1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "dc_link_dp.h" 32 #include "link_enc_cfg.h" 33 #include "dc/inc/core_types.h" 34 #include "dal_asic_id.h" 35 #include "dmub/dmub_srv.h" 36 #include "dc/inc/hw/dmcu.h" 37 #include "dc/inc/hw/abm.h" 38 #include "dc/dc_dmub_srv.h" 39 #include "dc/dc_edid_parser.h" 40 #include "dc/dc_stat.h" 41 #include "amdgpu_dm_trace.h" 42 43 #include "vid.h" 44 #include "amdgpu.h" 45 #include "amdgpu_display.h" 46 #include "amdgpu_ucode.h" 47 #include "atom.h" 48 #include "amdgpu_dm.h" 49 #ifdef CONFIG_DRM_AMD_DC_HDCP 50 #include "amdgpu_dm_hdcp.h" 51 #include <drm/display/drm_hdcp_helper.h> 52 #endif 53 #include "amdgpu_pm.h" 54 #include "amdgpu_atombios.h" 55 56 #include "amd_shared.h" 57 #include "amdgpu_dm_irq.h" 58 #include "dm_helpers.h" 59 #include "amdgpu_dm_mst_types.h" 60 #if defined(CONFIG_DEBUG_FS) 61 #include "amdgpu_dm_debugfs.h" 62 #endif 63 #include "amdgpu_dm_psr.h" 64 65 #include "ivsrcid/ivsrcid_vislands30.h" 66 67 #include "i2caux_interface.h" 68 #include <linux/module.h> 69 #include <linux/moduleparam.h> 70 #include <linux/types.h> 71 #include <linux/pm_runtime.h> 72 #include <linux/pci.h> 73 #include <linux/firmware.h> 74 #include <linux/component.h> 75 76 #include <drm/display/drm_dp_mst_helper.h> 77 #include <drm/display/drm_hdmi_helper.h> 78 #include <drm/drm_atomic.h> 79 #include <drm/drm_atomic_uapi.h> 80 #include <drm/drm_atomic_helper.h> 81 #include <drm/drm_blend.h> 82 #include <drm/drm_fb_helper.h> 83 #include <drm/drm_fourcc.h> 84 #include <drm/drm_edid.h> 85 #include <drm/drm_vblank.h> 86 #include <drm/drm_audio_component.h> 87 #include <drm/drm_gem_atomic_helper.h> 88 89 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 90 91 #include "dcn/dcn_1_0_offset.h" 92 #include "dcn/dcn_1_0_sh_mask.h" 93 #include "soc15_hw_ip.h" 94 #include "vega10_ip_offset.h" 95 96 #include "soc15_common.h" 97 98 #include "modules/inc/mod_freesync.h" 99 #include "modules/power/power_helpers.h" 100 #include "modules/inc/mod_info_packet.h" 101 102 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 103 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 104 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 105 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 106 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 107 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 108 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 109 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 110 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 111 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 112 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 113 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 114 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 115 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 116 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 118 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 120 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 122 123 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 124 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 125 126 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 127 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 128 129 /* Number of bytes in PSP header for firmware. */ 130 #define PSP_HEADER_BYTES 0x100 131 132 /* Number of bytes in PSP footer for firmware. */ 133 #define PSP_FOOTER_BYTES 0x100 134 135 /** 136 * DOC: overview 137 * 138 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 139 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 140 * requests into DC requests, and DC responses into DRM responses. 141 * 142 * The root control structure is &struct amdgpu_display_manager. 143 */ 144 145 /* basic init/fini API */ 146 static int amdgpu_dm_init(struct amdgpu_device *adev); 147 static void amdgpu_dm_fini(struct amdgpu_device *adev); 148 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 149 150 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 151 { 152 switch (link->dpcd_caps.dongle_type) { 153 case DISPLAY_DONGLE_NONE: 154 return DRM_MODE_SUBCONNECTOR_Native; 155 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 156 return DRM_MODE_SUBCONNECTOR_VGA; 157 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 158 case DISPLAY_DONGLE_DP_DVI_DONGLE: 159 return DRM_MODE_SUBCONNECTOR_DVID; 160 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 161 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 162 return DRM_MODE_SUBCONNECTOR_HDMIA; 163 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 164 default: 165 return DRM_MODE_SUBCONNECTOR_Unknown; 166 } 167 } 168 169 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 170 { 171 struct dc_link *link = aconnector->dc_link; 172 struct drm_connector *connector = &aconnector->base; 173 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 174 175 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 176 return; 177 178 if (aconnector->dc_sink) 179 subconnector = get_subconnector_type(link); 180 181 drm_object_property_set_value(&connector->base, 182 connector->dev->mode_config.dp_subconnector_property, 183 subconnector); 184 } 185 186 /* 187 * initializes drm_device display related structures, based on the information 188 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 189 * drm_encoder, drm_mode_config 190 * 191 * Returns 0 on success 192 */ 193 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 194 /* removes and deallocates the drm structures, created by the above function */ 195 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 196 197 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 198 struct drm_plane *plane, 199 unsigned long possible_crtcs, 200 const struct dc_plane_cap *plane_cap); 201 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 202 struct drm_plane *plane, 203 uint32_t link_index); 204 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 205 struct amdgpu_dm_connector *amdgpu_dm_connector, 206 uint32_t link_index, 207 struct amdgpu_encoder *amdgpu_encoder); 208 static int amdgpu_dm_encoder_init(struct drm_device *dev, 209 struct amdgpu_encoder *aencoder, 210 uint32_t link_index); 211 212 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 213 214 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 215 216 static int amdgpu_dm_atomic_check(struct drm_device *dev, 217 struct drm_atomic_state *state); 218 219 static void handle_cursor_update(struct drm_plane *plane, 220 struct drm_plane_state *old_plane_state); 221 222 static const struct drm_format_info * 223 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 224 225 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 226 static void handle_hpd_rx_irq(void *param); 227 228 static bool 229 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 230 struct drm_crtc_state *new_crtc_state); 231 /* 232 * dm_vblank_get_counter 233 * 234 * @brief 235 * Get counter for number of vertical blanks 236 * 237 * @param 238 * struct amdgpu_device *adev - [in] desired amdgpu device 239 * int disp_idx - [in] which CRTC to get the counter from 240 * 241 * @return 242 * Counter for vertical blanks 243 */ 244 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 245 { 246 if (crtc >= adev->mode_info.num_crtc) 247 return 0; 248 else { 249 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 250 251 if (acrtc->dm_irq_params.stream == NULL) { 252 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 253 crtc); 254 return 0; 255 } 256 257 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 258 } 259 } 260 261 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 262 u32 *vbl, u32 *position) 263 { 264 uint32_t v_blank_start, v_blank_end, h_position, v_position; 265 266 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 267 return -EINVAL; 268 else { 269 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 270 271 if (acrtc->dm_irq_params.stream == NULL) { 272 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 273 crtc); 274 return 0; 275 } 276 277 /* 278 * TODO rework base driver to use values directly. 279 * for now parse it back into reg-format 280 */ 281 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 282 &v_blank_start, 283 &v_blank_end, 284 &h_position, 285 &v_position); 286 287 *position = v_position | (h_position << 16); 288 *vbl = v_blank_start | (v_blank_end << 16); 289 } 290 291 return 0; 292 } 293 294 static bool dm_is_idle(void *handle) 295 { 296 /* XXX todo */ 297 return true; 298 } 299 300 static int dm_wait_for_idle(void *handle) 301 { 302 /* XXX todo */ 303 return 0; 304 } 305 306 static bool dm_check_soft_reset(void *handle) 307 { 308 return false; 309 } 310 311 static int dm_soft_reset(void *handle) 312 { 313 /* XXX todo */ 314 return 0; 315 } 316 317 static struct amdgpu_crtc * 318 get_crtc_by_otg_inst(struct amdgpu_device *adev, 319 int otg_inst) 320 { 321 struct drm_device *dev = adev_to_drm(adev); 322 struct drm_crtc *crtc; 323 struct amdgpu_crtc *amdgpu_crtc; 324 325 if (WARN_ON(otg_inst == -1)) 326 return adev->mode_info.crtcs[0]; 327 328 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 329 amdgpu_crtc = to_amdgpu_crtc(crtc); 330 331 if (amdgpu_crtc->otg_inst == otg_inst) 332 return amdgpu_crtc; 333 } 334 335 return NULL; 336 } 337 338 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) 339 { 340 return acrtc->dm_irq_params.freesync_config.state == 341 VRR_STATE_ACTIVE_VARIABLE || 342 acrtc->dm_irq_params.freesync_config.state == 343 VRR_STATE_ACTIVE_FIXED; 344 } 345 346 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) 347 { 348 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 349 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 350 } 351 352 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 353 struct dm_crtc_state *new_state) 354 { 355 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 356 return true; 357 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) 358 return true; 359 else 360 return false; 361 } 362 363 /** 364 * dm_pflip_high_irq() - Handle pageflip interrupt 365 * @interrupt_params: ignored 366 * 367 * Handles the pageflip interrupt by notifying all interested parties 368 * that the pageflip has been completed. 369 */ 370 static void dm_pflip_high_irq(void *interrupt_params) 371 { 372 struct amdgpu_crtc *amdgpu_crtc; 373 struct common_irq_params *irq_params = interrupt_params; 374 struct amdgpu_device *adev = irq_params->adev; 375 unsigned long flags; 376 struct drm_pending_vblank_event *e; 377 uint32_t vpos, hpos, v_blank_start, v_blank_end; 378 bool vrr_active; 379 380 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 381 382 /* IRQ could occur when in initial stage */ 383 /* TODO work and BO cleanup */ 384 if (amdgpu_crtc == NULL) { 385 DC_LOG_PFLIP("CRTC is null, returning.\n"); 386 return; 387 } 388 389 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 390 391 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 392 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 393 amdgpu_crtc->pflip_status, 394 AMDGPU_FLIP_SUBMITTED, 395 amdgpu_crtc->crtc_id, 396 amdgpu_crtc); 397 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 398 return; 399 } 400 401 /* page flip completed. */ 402 e = amdgpu_crtc->event; 403 amdgpu_crtc->event = NULL; 404 405 WARN_ON(!e); 406 407 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); 408 409 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 410 if (!vrr_active || 411 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 412 &v_blank_end, &hpos, &vpos) || 413 (vpos < v_blank_start)) { 414 /* Update to correct count and vblank timestamp if racing with 415 * vblank irq. This also updates to the correct vblank timestamp 416 * even in VRR mode, as scanout is past the front-porch atm. 417 */ 418 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 419 420 /* Wake up userspace by sending the pageflip event with proper 421 * count and timestamp of vblank of flip completion. 422 */ 423 if (e) { 424 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 425 426 /* Event sent, so done with vblank for this flip */ 427 drm_crtc_vblank_put(&amdgpu_crtc->base); 428 } 429 } else if (e) { 430 /* VRR active and inside front-porch: vblank count and 431 * timestamp for pageflip event will only be up to date after 432 * drm_crtc_handle_vblank() has been executed from late vblank 433 * irq handler after start of back-porch (vline 0). We queue the 434 * pageflip event for send-out by drm_crtc_handle_vblank() with 435 * updated timestamp and count, once it runs after us. 436 * 437 * We need to open-code this instead of using the helper 438 * drm_crtc_arm_vblank_event(), as that helper would 439 * call drm_crtc_accurate_vblank_count(), which we must 440 * not call in VRR mode while we are in front-porch! 441 */ 442 443 /* sequence will be replaced by real count during send-out. */ 444 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 445 e->pipe = amdgpu_crtc->crtc_id; 446 447 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 448 e = NULL; 449 } 450 451 /* Keep track of vblank of this flip for flip throttling. We use the 452 * cooked hw counter, as that one incremented at start of this vblank 453 * of pageflip completion, so last_flip_vblank is the forbidden count 454 * for queueing new pageflips if vsync + VRR is enabled. 455 */ 456 amdgpu_crtc->dm_irq_params.last_flip_vblank = 457 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 458 459 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 460 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 461 462 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 463 amdgpu_crtc->crtc_id, amdgpu_crtc, 464 vrr_active, (int) !e); 465 } 466 467 static void dm_vupdate_high_irq(void *interrupt_params) 468 { 469 struct common_irq_params *irq_params = interrupt_params; 470 struct amdgpu_device *adev = irq_params->adev; 471 struct amdgpu_crtc *acrtc; 472 struct drm_device *drm_dev; 473 struct drm_vblank_crtc *vblank; 474 ktime_t frame_duration_ns, previous_timestamp; 475 unsigned long flags; 476 int vrr_active; 477 478 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 479 480 if (acrtc) { 481 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 482 drm_dev = acrtc->base.dev; 483 vblank = &drm_dev->vblank[acrtc->base.index]; 484 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 485 frame_duration_ns = vblank->time - previous_timestamp; 486 487 if (frame_duration_ns > 0) { 488 trace_amdgpu_refresh_rate_track(acrtc->base.index, 489 frame_duration_ns, 490 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 491 atomic64_set(&irq_params->previous_timestamp, vblank->time); 492 } 493 494 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 495 acrtc->crtc_id, 496 vrr_active); 497 498 /* Core vblank handling is done here after end of front-porch in 499 * vrr mode, as vblank timestamping will give valid results 500 * while now done after front-porch. This will also deliver 501 * page-flip completion events that have been queued to us 502 * if a pageflip happened inside front-porch. 503 */ 504 if (vrr_active) { 505 drm_crtc_handle_vblank(&acrtc->base); 506 507 /* BTR processing for pre-DCE12 ASICs */ 508 if (acrtc->dm_irq_params.stream && 509 adev->family < AMDGPU_FAMILY_AI) { 510 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 511 mod_freesync_handle_v_update( 512 adev->dm.freesync_module, 513 acrtc->dm_irq_params.stream, 514 &acrtc->dm_irq_params.vrr_params); 515 516 dc_stream_adjust_vmin_vmax( 517 adev->dm.dc, 518 acrtc->dm_irq_params.stream, 519 &acrtc->dm_irq_params.vrr_params.adjust); 520 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 521 } 522 } 523 } 524 } 525 526 /** 527 * dm_crtc_high_irq() - Handles CRTC interrupt 528 * @interrupt_params: used for determining the CRTC instance 529 * 530 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 531 * event handler. 532 */ 533 static void dm_crtc_high_irq(void *interrupt_params) 534 { 535 struct common_irq_params *irq_params = interrupt_params; 536 struct amdgpu_device *adev = irq_params->adev; 537 struct amdgpu_crtc *acrtc; 538 unsigned long flags; 539 int vrr_active; 540 541 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 542 if (!acrtc) 543 return; 544 545 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 546 547 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 548 vrr_active, acrtc->dm_irq_params.active_planes); 549 550 /** 551 * Core vblank handling at start of front-porch is only possible 552 * in non-vrr mode, as only there vblank timestamping will give 553 * valid results while done in front-porch. Otherwise defer it 554 * to dm_vupdate_high_irq after end of front-porch. 555 */ 556 if (!vrr_active) 557 drm_crtc_handle_vblank(&acrtc->base); 558 559 /** 560 * Following stuff must happen at start of vblank, for crc 561 * computation and below-the-range btr support in vrr mode. 562 */ 563 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 564 565 /* BTR updates need to happen before VUPDATE on Vega and above. */ 566 if (adev->family < AMDGPU_FAMILY_AI) 567 return; 568 569 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 570 571 if (acrtc->dm_irq_params.stream && 572 acrtc->dm_irq_params.vrr_params.supported && 573 acrtc->dm_irq_params.freesync_config.state == 574 VRR_STATE_ACTIVE_VARIABLE) { 575 mod_freesync_handle_v_update(adev->dm.freesync_module, 576 acrtc->dm_irq_params.stream, 577 &acrtc->dm_irq_params.vrr_params); 578 579 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 580 &acrtc->dm_irq_params.vrr_params.adjust); 581 } 582 583 /* 584 * If there aren't any active_planes then DCH HUBP may be clock-gated. 585 * In that case, pageflip completion interrupts won't fire and pageflip 586 * completion events won't get delivered. Prevent this by sending 587 * pending pageflip events from here if a flip is still pending. 588 * 589 * If any planes are enabled, use dm_pflip_high_irq() instead, to 590 * avoid race conditions between flip programming and completion, 591 * which could cause too early flip completion events. 592 */ 593 if (adev->family >= AMDGPU_FAMILY_RV && 594 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 595 acrtc->dm_irq_params.active_planes == 0) { 596 if (acrtc->event) { 597 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 598 acrtc->event = NULL; 599 drm_crtc_vblank_put(&acrtc->base); 600 } 601 acrtc->pflip_status = AMDGPU_FLIP_NONE; 602 } 603 604 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 605 } 606 607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 608 /** 609 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 610 * DCN generation ASICs 611 * @interrupt_params: interrupt parameters 612 * 613 * Used to set crc window/read out crc value at vertical line 0 position 614 */ 615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 616 { 617 struct common_irq_params *irq_params = interrupt_params; 618 struct amdgpu_device *adev = irq_params->adev; 619 struct amdgpu_crtc *acrtc; 620 621 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 622 623 if (!acrtc) 624 return; 625 626 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 627 } 628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 629 630 /** 631 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 632 * @adev: amdgpu_device pointer 633 * @notify: dmub notification structure 634 * 635 * Dmub AUX or SET_CONFIG command completion processing callback 636 * Copies dmub notification to DM which is to be read by AUX command. 637 * issuing thread and also signals the event to wake up the thread. 638 */ 639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 640 struct dmub_notification *notify) 641 { 642 if (adev->dm.dmub_notify) 643 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 644 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 645 complete(&adev->dm.dmub_aux_transfer_done); 646 } 647 648 /** 649 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 650 * @adev: amdgpu_device pointer 651 * @notify: dmub notification structure 652 * 653 * Dmub Hpd interrupt processing callback. Gets displayindex through the 654 * ink index and calls helper to do the processing. 655 */ 656 static void dmub_hpd_callback(struct amdgpu_device *adev, 657 struct dmub_notification *notify) 658 { 659 struct amdgpu_dm_connector *aconnector; 660 struct amdgpu_dm_connector *hpd_aconnector = NULL; 661 struct drm_connector *connector; 662 struct drm_connector_list_iter iter; 663 struct dc_link *link; 664 uint8_t link_index = 0; 665 struct drm_device *dev; 666 667 if (adev == NULL) 668 return; 669 670 if (notify == NULL) { 671 DRM_ERROR("DMUB HPD callback notification was NULL"); 672 return; 673 } 674 675 if (notify->link_index > adev->dm.dc->link_count) { 676 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 677 return; 678 } 679 680 link_index = notify->link_index; 681 link = adev->dm.dc->links[link_index]; 682 dev = adev->dm.ddev; 683 684 drm_connector_list_iter_begin(dev, &iter); 685 drm_for_each_connector_iter(connector, &iter) { 686 aconnector = to_amdgpu_dm_connector(connector); 687 if (link && aconnector->dc_link == link) { 688 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 689 hpd_aconnector = aconnector; 690 break; 691 } 692 } 693 drm_connector_list_iter_end(&iter); 694 695 if (hpd_aconnector) { 696 if (notify->type == DMUB_NOTIFICATION_HPD) 697 handle_hpd_irq_helper(hpd_aconnector); 698 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 699 handle_hpd_rx_irq(hpd_aconnector); 700 } 701 } 702 703 /** 704 * register_dmub_notify_callback - Sets callback for DMUB notify 705 * @adev: amdgpu_device pointer 706 * @type: Type of dmub notification 707 * @callback: Dmub interrupt callback function 708 * @dmub_int_thread_offload: offload indicator 709 * 710 * API to register a dmub callback handler for a dmub notification 711 * Also sets indicator whether callback processing to be offloaded. 712 * to dmub interrupt handling thread 713 * Return: true if successfully registered, false if there is existing registration 714 */ 715 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 716 enum dmub_notification_type type, 717 dmub_notify_interrupt_callback_t callback, 718 bool dmub_int_thread_offload) 719 { 720 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 721 adev->dm.dmub_callback[type] = callback; 722 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 723 } else 724 return false; 725 726 return true; 727 } 728 729 static void dm_handle_hpd_work(struct work_struct *work) 730 { 731 struct dmub_hpd_work *dmub_hpd_wrk; 732 733 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 734 735 if (!dmub_hpd_wrk->dmub_notify) { 736 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 737 return; 738 } 739 740 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 741 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 742 dmub_hpd_wrk->dmub_notify); 743 } 744 745 kfree(dmub_hpd_wrk->dmub_notify); 746 kfree(dmub_hpd_wrk); 747 748 } 749 750 #define DMUB_TRACE_MAX_READ 64 751 /** 752 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 753 * @interrupt_params: used for determining the Outbox instance 754 * 755 * Handles the Outbox Interrupt 756 * event handler. 757 */ 758 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 759 { 760 struct dmub_notification notify; 761 struct common_irq_params *irq_params = interrupt_params; 762 struct amdgpu_device *adev = irq_params->adev; 763 struct amdgpu_display_manager *dm = &adev->dm; 764 struct dmcub_trace_buf_entry entry = { 0 }; 765 uint32_t count = 0; 766 struct dmub_hpd_work *dmub_hpd_wrk; 767 struct dc_link *plink = NULL; 768 769 if (dc_enable_dmub_notifications(adev->dm.dc) && 770 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 771 772 do { 773 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 774 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 775 DRM_ERROR("DM: notify type %d invalid!", notify.type); 776 continue; 777 } 778 if (!dm->dmub_callback[notify.type]) { 779 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); 780 continue; 781 } 782 if (dm->dmub_thread_offload[notify.type] == true) { 783 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 784 if (!dmub_hpd_wrk) { 785 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 786 return; 787 } 788 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); 789 if (!dmub_hpd_wrk->dmub_notify) { 790 kfree(dmub_hpd_wrk); 791 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 792 return; 793 } 794 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 795 if (dmub_hpd_wrk->dmub_notify) 796 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); 797 dmub_hpd_wrk->adev = adev; 798 if (notify.type == DMUB_NOTIFICATION_HPD) { 799 plink = adev->dm.dc->links[notify.link_index]; 800 if (plink) { 801 plink->hpd_status = 802 notify.hpd_status == DP_HPD_PLUG; 803 } 804 } 805 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 806 } else { 807 dm->dmub_callback[notify.type](adev, ¬ify); 808 } 809 } while (notify.pending_notification); 810 } 811 812 813 do { 814 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 815 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 816 entry.param0, entry.param1); 817 818 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 819 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 820 } else 821 break; 822 823 count++; 824 825 } while (count <= DMUB_TRACE_MAX_READ); 826 827 if (count > DMUB_TRACE_MAX_READ) 828 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 829 } 830 831 static int dm_set_clockgating_state(void *handle, 832 enum amd_clockgating_state state) 833 { 834 return 0; 835 } 836 837 static int dm_set_powergating_state(void *handle, 838 enum amd_powergating_state state) 839 { 840 return 0; 841 } 842 843 /* Prototypes of private functions */ 844 static int dm_early_init(void* handle); 845 846 /* Allocate memory for FBC compressed data */ 847 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 848 { 849 struct drm_device *dev = connector->dev; 850 struct amdgpu_device *adev = drm_to_adev(dev); 851 struct dm_compressor_info *compressor = &adev->dm.compressor; 852 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 853 struct drm_display_mode *mode; 854 unsigned long max_size = 0; 855 856 if (adev->dm.dc->fbc_compressor == NULL) 857 return; 858 859 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 860 return; 861 862 if (compressor->bo_ptr) 863 return; 864 865 866 list_for_each_entry(mode, &connector->modes, head) { 867 if (max_size < mode->htotal * mode->vtotal) 868 max_size = mode->htotal * mode->vtotal; 869 } 870 871 if (max_size) { 872 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 873 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 874 &compressor->gpu_addr, &compressor->cpu_addr); 875 876 if (r) 877 DRM_ERROR("DM: Failed to initialize FBC\n"); 878 else { 879 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 880 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 881 } 882 883 } 884 885 } 886 887 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 888 int pipe, bool *enabled, 889 unsigned char *buf, int max_bytes) 890 { 891 struct drm_device *dev = dev_get_drvdata(kdev); 892 struct amdgpu_device *adev = drm_to_adev(dev); 893 struct drm_connector *connector; 894 struct drm_connector_list_iter conn_iter; 895 struct amdgpu_dm_connector *aconnector; 896 int ret = 0; 897 898 *enabled = false; 899 900 mutex_lock(&adev->dm.audio_lock); 901 902 drm_connector_list_iter_begin(dev, &conn_iter); 903 drm_for_each_connector_iter(connector, &conn_iter) { 904 aconnector = to_amdgpu_dm_connector(connector); 905 if (aconnector->audio_inst != port) 906 continue; 907 908 *enabled = true; 909 ret = drm_eld_size(connector->eld); 910 memcpy(buf, connector->eld, min(max_bytes, ret)); 911 912 break; 913 } 914 drm_connector_list_iter_end(&conn_iter); 915 916 mutex_unlock(&adev->dm.audio_lock); 917 918 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 919 920 return ret; 921 } 922 923 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 924 .get_eld = amdgpu_dm_audio_component_get_eld, 925 }; 926 927 static int amdgpu_dm_audio_component_bind(struct device *kdev, 928 struct device *hda_kdev, void *data) 929 { 930 struct drm_device *dev = dev_get_drvdata(kdev); 931 struct amdgpu_device *adev = drm_to_adev(dev); 932 struct drm_audio_component *acomp = data; 933 934 acomp->ops = &amdgpu_dm_audio_component_ops; 935 acomp->dev = kdev; 936 adev->dm.audio_component = acomp; 937 938 return 0; 939 } 940 941 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 942 struct device *hda_kdev, void *data) 943 { 944 struct drm_device *dev = dev_get_drvdata(kdev); 945 struct amdgpu_device *adev = drm_to_adev(dev); 946 struct drm_audio_component *acomp = data; 947 948 acomp->ops = NULL; 949 acomp->dev = NULL; 950 adev->dm.audio_component = NULL; 951 } 952 953 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 954 .bind = amdgpu_dm_audio_component_bind, 955 .unbind = amdgpu_dm_audio_component_unbind, 956 }; 957 958 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 959 { 960 int i, ret; 961 962 if (!amdgpu_audio) 963 return 0; 964 965 adev->mode_info.audio.enabled = true; 966 967 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 968 969 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 970 adev->mode_info.audio.pin[i].channels = -1; 971 adev->mode_info.audio.pin[i].rate = -1; 972 adev->mode_info.audio.pin[i].bits_per_sample = -1; 973 adev->mode_info.audio.pin[i].status_bits = 0; 974 adev->mode_info.audio.pin[i].category_code = 0; 975 adev->mode_info.audio.pin[i].connected = false; 976 adev->mode_info.audio.pin[i].id = 977 adev->dm.dc->res_pool->audios[i]->inst; 978 adev->mode_info.audio.pin[i].offset = 0; 979 } 980 981 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 982 if (ret < 0) 983 return ret; 984 985 adev->dm.audio_registered = true; 986 987 return 0; 988 } 989 990 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 991 { 992 if (!amdgpu_audio) 993 return; 994 995 if (!adev->mode_info.audio.enabled) 996 return; 997 998 if (adev->dm.audio_registered) { 999 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1000 adev->dm.audio_registered = false; 1001 } 1002 1003 /* TODO: Disable audio? */ 1004 1005 adev->mode_info.audio.enabled = false; 1006 } 1007 1008 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1009 { 1010 struct drm_audio_component *acomp = adev->dm.audio_component; 1011 1012 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1013 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1014 1015 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1016 pin, -1); 1017 } 1018 } 1019 1020 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1021 { 1022 const struct dmcub_firmware_header_v1_0 *hdr; 1023 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1024 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1025 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1026 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1027 struct abm *abm = adev->dm.dc->res_pool->abm; 1028 struct dmub_srv_hw_params hw_params; 1029 enum dmub_status status; 1030 const unsigned char *fw_inst_const, *fw_bss_data; 1031 uint32_t i, fw_inst_const_size, fw_bss_data_size; 1032 bool has_hw_support; 1033 1034 if (!dmub_srv) 1035 /* DMUB isn't supported on the ASIC. */ 1036 return 0; 1037 1038 if (!fb_info) { 1039 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1040 return -EINVAL; 1041 } 1042 1043 if (!dmub_fw) { 1044 /* Firmware required for DMUB support. */ 1045 DRM_ERROR("No firmware provided for DMUB.\n"); 1046 return -EINVAL; 1047 } 1048 1049 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1050 if (status != DMUB_STATUS_OK) { 1051 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1052 return -EINVAL; 1053 } 1054 1055 if (!has_hw_support) { 1056 DRM_INFO("DMUB unsupported on ASIC\n"); 1057 return 0; 1058 } 1059 1060 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1061 status = dmub_srv_hw_reset(dmub_srv); 1062 if (status != DMUB_STATUS_OK) 1063 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1064 1065 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1066 1067 fw_inst_const = dmub_fw->data + 1068 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1069 PSP_HEADER_BYTES; 1070 1071 fw_bss_data = dmub_fw->data + 1072 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1073 le32_to_cpu(hdr->inst_const_bytes); 1074 1075 /* Copy firmware and bios info into FB memory. */ 1076 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1077 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1078 1079 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1080 1081 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1082 * amdgpu_ucode_init_single_fw will load dmub firmware 1083 * fw_inst_const part to cw0; otherwise, the firmware back door load 1084 * will be done by dm_dmub_hw_init 1085 */ 1086 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1087 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1088 fw_inst_const_size); 1089 } 1090 1091 if (fw_bss_data_size) 1092 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1093 fw_bss_data, fw_bss_data_size); 1094 1095 /* Copy firmware bios info into FB memory. */ 1096 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1097 adev->bios_size); 1098 1099 /* Reset regions that need to be reset. */ 1100 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1101 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1102 1103 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1104 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1105 1106 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1107 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1108 1109 /* Initialize hardware. */ 1110 memset(&hw_params, 0, sizeof(hw_params)); 1111 hw_params.fb_base = adev->gmc.fb_start; 1112 hw_params.fb_offset = adev->gmc.aper_base; 1113 1114 /* backdoor load firmware and trigger dmub running */ 1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1116 hw_params.load_inst_const = true; 1117 1118 if (dmcu) 1119 hw_params.psp_version = dmcu->psp_version; 1120 1121 for (i = 0; i < fb_info->num_fb; ++i) 1122 hw_params.fb[i] = &fb_info->fb[i]; 1123 1124 switch (adev->ip_versions[DCE_HWIP][0]) { 1125 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ 1126 hw_params.dpia_supported = true; 1127 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1128 break; 1129 default: 1130 break; 1131 } 1132 1133 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1134 if (status != DMUB_STATUS_OK) { 1135 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1136 return -EINVAL; 1137 } 1138 1139 /* Wait for firmware load to finish. */ 1140 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1141 if (status != DMUB_STATUS_OK) 1142 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1143 1144 /* Init DMCU and ABM if available. */ 1145 if (dmcu && abm) { 1146 dmcu->funcs->dmcu_init(dmcu); 1147 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1148 } 1149 1150 if (!adev->dm.dc->ctx->dmub_srv) 1151 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1152 if (!adev->dm.dc->ctx->dmub_srv) { 1153 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1154 return -ENOMEM; 1155 } 1156 1157 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1158 adev->dm.dmcub_fw_version); 1159 1160 return 0; 1161 } 1162 1163 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1164 { 1165 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1166 enum dmub_status status; 1167 bool init; 1168 1169 if (!dmub_srv) { 1170 /* DMUB isn't supported on the ASIC. */ 1171 return; 1172 } 1173 1174 status = dmub_srv_is_hw_init(dmub_srv, &init); 1175 if (status != DMUB_STATUS_OK) 1176 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1177 1178 if (status == DMUB_STATUS_OK && init) { 1179 /* Wait for firmware load to finish. */ 1180 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1181 if (status != DMUB_STATUS_OK) 1182 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1183 } else { 1184 /* Perform the full hardware initialization. */ 1185 dm_dmub_hw_init(adev); 1186 } 1187 } 1188 1189 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1190 { 1191 uint64_t pt_base; 1192 uint32_t logical_addr_low; 1193 uint32_t logical_addr_high; 1194 uint32_t agp_base, agp_bot, agp_top; 1195 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1196 1197 memset(pa_config, 0, sizeof(*pa_config)); 1198 1199 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1200 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1201 1202 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1203 /* 1204 * Raven2 has a HW issue that it is unable to use the vram which 1205 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1206 * workaround that increase system aperture high address (add 1) 1207 * to get rid of the VM fault and hardware hang. 1208 */ 1209 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1210 else 1211 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1212 1213 agp_base = 0; 1214 agp_bot = adev->gmc.agp_start >> 24; 1215 agp_top = adev->gmc.agp_end >> 24; 1216 1217 1218 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; 1219 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); 1220 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; 1221 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); 1222 page_table_base.high_part = upper_32_bits(pt_base) & 0xF; 1223 page_table_base.low_part = lower_32_bits(pt_base); 1224 1225 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1226 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1227 1228 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; 1229 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1230 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1231 1232 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1233 pa_config->system_aperture.fb_offset = adev->gmc.aper_base; 1234 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1235 1236 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1237 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1238 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1239 1240 pa_config->is_hvm_enabled = 0; 1241 1242 } 1243 1244 static void vblank_control_worker(struct work_struct *work) 1245 { 1246 struct vblank_control_work *vblank_work = 1247 container_of(work, struct vblank_control_work, work); 1248 struct amdgpu_display_manager *dm = vblank_work->dm; 1249 1250 mutex_lock(&dm->dc_lock); 1251 1252 if (vblank_work->enable) 1253 dm->active_vblank_irq_count++; 1254 else if(dm->active_vblank_irq_count) 1255 dm->active_vblank_irq_count--; 1256 1257 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); 1258 1259 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); 1260 1261 /* Control PSR based on vblank requirements from OS */ 1262 if (vblank_work->stream && vblank_work->stream->link) { 1263 if (vblank_work->enable) { 1264 if (vblank_work->stream->link->psr_settings.psr_allow_active) 1265 amdgpu_dm_psr_disable(vblank_work->stream); 1266 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && 1267 !vblank_work->stream->link->psr_settings.psr_allow_active && 1268 vblank_work->acrtc->dm_irq_params.allow_psr_entry) { 1269 amdgpu_dm_psr_enable(vblank_work->stream); 1270 } 1271 } 1272 1273 mutex_unlock(&dm->dc_lock); 1274 1275 dc_stream_release(vblank_work->stream); 1276 1277 kfree(vblank_work); 1278 } 1279 1280 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1281 { 1282 struct hpd_rx_irq_offload_work *offload_work; 1283 struct amdgpu_dm_connector *aconnector; 1284 struct dc_link *dc_link; 1285 struct amdgpu_device *adev; 1286 enum dc_connection_type new_connection_type = dc_connection_none; 1287 unsigned long flags; 1288 1289 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1290 aconnector = offload_work->offload_wq->aconnector; 1291 1292 if (!aconnector) { 1293 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1294 goto skip; 1295 } 1296 1297 adev = drm_to_adev(aconnector->base.dev); 1298 dc_link = aconnector->dc_link; 1299 1300 mutex_lock(&aconnector->hpd_lock); 1301 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 1302 DRM_ERROR("KMS: Failed to detect connector\n"); 1303 mutex_unlock(&aconnector->hpd_lock); 1304 1305 if (new_connection_type == dc_connection_none) 1306 goto skip; 1307 1308 if (amdgpu_in_reset(adev)) 1309 goto skip; 1310 1311 mutex_lock(&adev->dm.dc_lock); 1312 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) 1313 dc_link_dp_handle_automated_test(dc_link); 1314 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1315 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && 1316 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1317 dc_link_dp_handle_link_loss(dc_link); 1318 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1319 offload_work->offload_wq->is_handling_link_loss = false; 1320 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1321 } 1322 mutex_unlock(&adev->dm.dc_lock); 1323 1324 skip: 1325 kfree(offload_work); 1326 1327 } 1328 1329 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1330 { 1331 int max_caps = dc->caps.max_links; 1332 int i = 0; 1333 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1334 1335 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1336 1337 if (!hpd_rx_offload_wq) 1338 return NULL; 1339 1340 1341 for (i = 0; i < max_caps; i++) { 1342 hpd_rx_offload_wq[i].wq = 1343 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1344 1345 if (hpd_rx_offload_wq[i].wq == NULL) { 1346 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1347 return NULL; 1348 } 1349 1350 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1351 } 1352 1353 return hpd_rx_offload_wq; 1354 } 1355 1356 struct amdgpu_stutter_quirk { 1357 u16 chip_vendor; 1358 u16 chip_device; 1359 u16 subsys_vendor; 1360 u16 subsys_device; 1361 u8 revision; 1362 }; 1363 1364 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1365 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1366 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1367 { 0, 0, 0, 0, 0 }, 1368 }; 1369 1370 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1371 { 1372 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1373 1374 while (p && p->chip_device != 0) { 1375 if (pdev->vendor == p->chip_vendor && 1376 pdev->device == p->chip_device && 1377 pdev->subsystem_vendor == p->subsys_vendor && 1378 pdev->subsystem_device == p->subsys_device && 1379 pdev->revision == p->revision) { 1380 return true; 1381 } 1382 ++p; 1383 } 1384 return false; 1385 } 1386 1387 static int amdgpu_dm_init(struct amdgpu_device *adev) 1388 { 1389 struct dc_init_data init_data; 1390 #ifdef CONFIG_DRM_AMD_DC_HDCP 1391 struct dc_callback_init init_params; 1392 #endif 1393 int r; 1394 1395 adev->dm.ddev = adev_to_drm(adev); 1396 adev->dm.adev = adev; 1397 1398 /* Zero all the fields */ 1399 memset(&init_data, 0, sizeof(init_data)); 1400 #ifdef CONFIG_DRM_AMD_DC_HDCP 1401 memset(&init_params, 0, sizeof(init_params)); 1402 #endif 1403 1404 mutex_init(&adev->dm.dc_lock); 1405 mutex_init(&adev->dm.audio_lock); 1406 spin_lock_init(&adev->dm.vblank_lock); 1407 1408 if(amdgpu_dm_irq_init(adev)) { 1409 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1410 goto error; 1411 } 1412 1413 init_data.asic_id.chip_family = adev->family; 1414 1415 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1416 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1417 init_data.asic_id.chip_id = adev->pdev->device; 1418 1419 init_data.asic_id.vram_width = adev->gmc.vram_width; 1420 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1421 init_data.asic_id.atombios_base_address = 1422 adev->mode_info.atom_context->bios; 1423 1424 init_data.driver = adev; 1425 1426 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1427 1428 if (!adev->dm.cgs_device) { 1429 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1430 goto error; 1431 } 1432 1433 init_data.cgs_device = adev->dm.cgs_device; 1434 1435 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1436 1437 switch (adev->ip_versions[DCE_HWIP][0]) { 1438 case IP_VERSION(2, 1, 0): 1439 switch (adev->dm.dmcub_fw_version) { 1440 case 0: /* development */ 1441 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1442 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1443 init_data.flags.disable_dmcu = false; 1444 break; 1445 default: 1446 init_data.flags.disable_dmcu = true; 1447 } 1448 break; 1449 case IP_VERSION(2, 0, 3): 1450 init_data.flags.disable_dmcu = true; 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 switch (adev->asic_type) { 1457 case CHIP_CARRIZO: 1458 case CHIP_STONEY: 1459 init_data.flags.gpu_vm_support = true; 1460 break; 1461 default: 1462 switch (adev->ip_versions[DCE_HWIP][0]) { 1463 case IP_VERSION(1, 0, 0): 1464 case IP_VERSION(1, 0, 1): 1465 /* enable S/G on PCO and RV2 */ 1466 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || 1467 (adev->apu_flags & AMD_APU_IS_PICASSO)) 1468 init_data.flags.gpu_vm_support = true; 1469 break; 1470 case IP_VERSION(2, 1, 0): 1471 case IP_VERSION(3, 0, 1): 1472 case IP_VERSION(3, 1, 2): 1473 case IP_VERSION(3, 1, 3): 1474 case IP_VERSION(3, 1, 5): 1475 case IP_VERSION(3, 1, 6): 1476 init_data.flags.gpu_vm_support = true; 1477 break; 1478 default: 1479 break; 1480 } 1481 break; 1482 } 1483 1484 if (init_data.flags.gpu_vm_support) 1485 adev->mode_info.gpu_vm_support = true; 1486 1487 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1488 init_data.flags.fbc_support = true; 1489 1490 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1491 init_data.flags.multi_mon_pp_mclk_switch = true; 1492 1493 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1494 init_data.flags.disable_fractional_pwm = true; 1495 1496 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1497 init_data.flags.edp_no_power_sequencing = true; 1498 1499 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1500 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1501 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1502 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1503 1504 init_data.flags.seamless_boot_edp_requested = false; 1505 1506 if (check_seamless_boot_capability(adev)) { 1507 init_data.flags.seamless_boot_edp_requested = true; 1508 init_data.flags.allow_seamless_boot_optimization = true; 1509 DRM_INFO("Seamless boot condition check passed\n"); 1510 } 1511 1512 INIT_LIST_HEAD(&adev->dm.da_list); 1513 /* Display Core create. */ 1514 adev->dm.dc = dc_create(&init_data); 1515 1516 if (adev->dm.dc) { 1517 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 1518 } else { 1519 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1520 goto error; 1521 } 1522 1523 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1524 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1525 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1526 } 1527 1528 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1529 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1530 if (dm_should_disable_stutter(adev->pdev)) 1531 adev->dm.dc->debug.disable_stutter = true; 1532 1533 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1534 adev->dm.dc->debug.disable_stutter = true; 1535 1536 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { 1537 adev->dm.dc->debug.disable_dsc = true; 1538 adev->dm.dc->debug.disable_dsc_edp = true; 1539 } 1540 1541 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1542 adev->dm.dc->debug.disable_clock_gate = true; 1543 1544 r = dm_dmub_hw_init(adev); 1545 if (r) { 1546 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1547 goto error; 1548 } 1549 1550 dc_hardware_init(adev->dm.dc); 1551 1552 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1553 if (!adev->dm.hpd_rx_offload_wq) { 1554 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1555 goto error; 1556 } 1557 1558 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1559 struct dc_phy_addr_space_config pa_config; 1560 1561 mmhub_read_system_context(adev, &pa_config); 1562 1563 // Call the DC init_memory func 1564 dc_setup_system_context(adev->dm.dc, &pa_config); 1565 } 1566 1567 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1568 if (!adev->dm.freesync_module) { 1569 DRM_ERROR( 1570 "amdgpu: failed to initialize freesync_module.\n"); 1571 } else 1572 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1573 adev->dm.freesync_module); 1574 1575 amdgpu_dm_init_color_mod(); 1576 1577 if (adev->dm.dc->caps.max_links > 0) { 1578 adev->dm.vblank_control_workqueue = 1579 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1580 if (!adev->dm.vblank_control_workqueue) 1581 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1582 } 1583 1584 #ifdef CONFIG_DRM_AMD_DC_HDCP 1585 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 1586 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1587 1588 if (!adev->dm.hdcp_workqueue) 1589 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1590 else 1591 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1592 1593 dc_init_callbacks(adev->dm.dc, &init_params); 1594 } 1595 #endif 1596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1597 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1598 #endif 1599 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1600 init_completion(&adev->dm.dmub_aux_transfer_done); 1601 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1602 if (!adev->dm.dmub_notify) { 1603 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1604 goto error; 1605 } 1606 1607 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1608 if (!adev->dm.delayed_hpd_wq) { 1609 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1610 goto error; 1611 } 1612 1613 amdgpu_dm_outbox_init(adev); 1614 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1615 dmub_aux_setconfig_callback, false)) { 1616 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1617 goto error; 1618 } 1619 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1620 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1621 goto error; 1622 } 1623 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { 1624 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1625 goto error; 1626 } 1627 } 1628 1629 if (amdgpu_dm_initialize_drm_device(adev)) { 1630 DRM_ERROR( 1631 "amdgpu: failed to initialize sw for display support.\n"); 1632 goto error; 1633 } 1634 1635 /* create fake encoders for MST */ 1636 dm_dp_create_fake_mst_encoders(adev); 1637 1638 /* TODO: Add_display_info? */ 1639 1640 /* TODO use dynamic cursor width */ 1641 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1642 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1643 1644 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1645 DRM_ERROR( 1646 "amdgpu: failed to initialize sw for display support.\n"); 1647 goto error; 1648 } 1649 1650 1651 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1652 1653 return 0; 1654 error: 1655 amdgpu_dm_fini(adev); 1656 1657 return -EINVAL; 1658 } 1659 1660 static int amdgpu_dm_early_fini(void *handle) 1661 { 1662 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1663 1664 amdgpu_dm_audio_fini(adev); 1665 1666 return 0; 1667 } 1668 1669 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1670 { 1671 int i; 1672 1673 if (adev->dm.vblank_control_workqueue) { 1674 destroy_workqueue(adev->dm.vblank_control_workqueue); 1675 adev->dm.vblank_control_workqueue = NULL; 1676 } 1677 1678 for (i = 0; i < adev->dm.display_indexes_num; i++) { 1679 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1680 } 1681 1682 amdgpu_dm_destroy_drm_device(&adev->dm); 1683 1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1685 if (adev->dm.crc_rd_wrk) { 1686 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 1687 kfree(adev->dm.crc_rd_wrk); 1688 adev->dm.crc_rd_wrk = NULL; 1689 } 1690 #endif 1691 #ifdef CONFIG_DRM_AMD_DC_HDCP 1692 if (adev->dm.hdcp_workqueue) { 1693 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1694 adev->dm.hdcp_workqueue = NULL; 1695 } 1696 1697 if (adev->dm.dc) 1698 dc_deinit_callbacks(adev->dm.dc); 1699 #endif 1700 1701 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1702 1703 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1704 kfree(adev->dm.dmub_notify); 1705 adev->dm.dmub_notify = NULL; 1706 destroy_workqueue(adev->dm.delayed_hpd_wq); 1707 adev->dm.delayed_hpd_wq = NULL; 1708 } 1709 1710 if (adev->dm.dmub_bo) 1711 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1712 &adev->dm.dmub_bo_gpu_addr, 1713 &adev->dm.dmub_bo_cpu_addr); 1714 1715 if (adev->dm.hpd_rx_offload_wq) { 1716 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1717 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1718 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1719 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1720 } 1721 } 1722 1723 kfree(adev->dm.hpd_rx_offload_wq); 1724 adev->dm.hpd_rx_offload_wq = NULL; 1725 } 1726 1727 /* DC Destroy TODO: Replace destroy DAL */ 1728 if (adev->dm.dc) 1729 dc_destroy(&adev->dm.dc); 1730 /* 1731 * TODO: pageflip, vlank interrupt 1732 * 1733 * amdgpu_dm_irq_fini(adev); 1734 */ 1735 1736 if (adev->dm.cgs_device) { 1737 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1738 adev->dm.cgs_device = NULL; 1739 } 1740 if (adev->dm.freesync_module) { 1741 mod_freesync_destroy(adev->dm.freesync_module); 1742 adev->dm.freesync_module = NULL; 1743 } 1744 1745 mutex_destroy(&adev->dm.audio_lock); 1746 mutex_destroy(&adev->dm.dc_lock); 1747 1748 return; 1749 } 1750 1751 static int load_dmcu_fw(struct amdgpu_device *adev) 1752 { 1753 const char *fw_name_dmcu = NULL; 1754 int r; 1755 const struct dmcu_firmware_header_v1_0 *hdr; 1756 1757 switch(adev->asic_type) { 1758 #if defined(CONFIG_DRM_AMD_DC_SI) 1759 case CHIP_TAHITI: 1760 case CHIP_PITCAIRN: 1761 case CHIP_VERDE: 1762 case CHIP_OLAND: 1763 #endif 1764 case CHIP_BONAIRE: 1765 case CHIP_HAWAII: 1766 case CHIP_KAVERI: 1767 case CHIP_KABINI: 1768 case CHIP_MULLINS: 1769 case CHIP_TONGA: 1770 case CHIP_FIJI: 1771 case CHIP_CARRIZO: 1772 case CHIP_STONEY: 1773 case CHIP_POLARIS11: 1774 case CHIP_POLARIS10: 1775 case CHIP_POLARIS12: 1776 case CHIP_VEGAM: 1777 case CHIP_VEGA10: 1778 case CHIP_VEGA12: 1779 case CHIP_VEGA20: 1780 return 0; 1781 case CHIP_NAVI12: 1782 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1783 break; 1784 case CHIP_RAVEN: 1785 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1786 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1787 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1788 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1789 else 1790 return 0; 1791 break; 1792 default: 1793 switch (adev->ip_versions[DCE_HWIP][0]) { 1794 case IP_VERSION(2, 0, 2): 1795 case IP_VERSION(2, 0, 3): 1796 case IP_VERSION(2, 0, 0): 1797 case IP_VERSION(2, 1, 0): 1798 case IP_VERSION(3, 0, 0): 1799 case IP_VERSION(3, 0, 2): 1800 case IP_VERSION(3, 0, 3): 1801 case IP_VERSION(3, 0, 1): 1802 case IP_VERSION(3, 1, 2): 1803 case IP_VERSION(3, 1, 3): 1804 case IP_VERSION(3, 1, 5): 1805 case IP_VERSION(3, 1, 6): 1806 return 0; 1807 default: 1808 break; 1809 } 1810 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 1811 return -EINVAL; 1812 } 1813 1814 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1815 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 1816 return 0; 1817 } 1818 1819 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); 1820 if (r == -ENOENT) { 1821 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 1822 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 1823 adev->dm.fw_dmcu = NULL; 1824 return 0; 1825 } 1826 if (r) { 1827 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", 1828 fw_name_dmcu); 1829 return r; 1830 } 1831 1832 r = amdgpu_ucode_validate(adev->dm.fw_dmcu); 1833 if (r) { 1834 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 1835 fw_name_dmcu); 1836 release_firmware(adev->dm.fw_dmcu); 1837 adev->dm.fw_dmcu = NULL; 1838 return r; 1839 } 1840 1841 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 1842 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 1843 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 1844 adev->firmware.fw_size += 1845 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1846 1847 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 1848 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 1849 adev->firmware.fw_size += 1850 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1851 1852 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 1853 1854 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 1855 1856 return 0; 1857 } 1858 1859 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 1860 { 1861 struct amdgpu_device *adev = ctx; 1862 1863 return dm_read_reg(adev->dm.dc->ctx, address); 1864 } 1865 1866 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 1867 uint32_t value) 1868 { 1869 struct amdgpu_device *adev = ctx; 1870 1871 return dm_write_reg(adev->dm.dc->ctx, address, value); 1872 } 1873 1874 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1875 { 1876 struct dmub_srv_create_params create_params; 1877 struct dmub_srv_region_params region_params; 1878 struct dmub_srv_region_info region_info; 1879 struct dmub_srv_fb_params fb_params; 1880 struct dmub_srv_fb_info *fb_info; 1881 struct dmub_srv *dmub_srv; 1882 const struct dmcub_firmware_header_v1_0 *hdr; 1883 const char *fw_name_dmub; 1884 enum dmub_asic dmub_asic; 1885 enum dmub_status status; 1886 int r; 1887 1888 switch (adev->ip_versions[DCE_HWIP][0]) { 1889 case IP_VERSION(2, 1, 0): 1890 dmub_asic = DMUB_ASIC_DCN21; 1891 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 1892 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 1893 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 1894 break; 1895 case IP_VERSION(3, 0, 0): 1896 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { 1897 dmub_asic = DMUB_ASIC_DCN30; 1898 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 1899 } else { 1900 dmub_asic = DMUB_ASIC_DCN30; 1901 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 1902 } 1903 break; 1904 case IP_VERSION(3, 0, 1): 1905 dmub_asic = DMUB_ASIC_DCN301; 1906 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 1907 break; 1908 case IP_VERSION(3, 0, 2): 1909 dmub_asic = DMUB_ASIC_DCN302; 1910 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 1911 break; 1912 case IP_VERSION(3, 0, 3): 1913 dmub_asic = DMUB_ASIC_DCN303; 1914 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 1915 break; 1916 case IP_VERSION(3, 1, 2): 1917 case IP_VERSION(3, 1, 3): 1918 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 1919 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 1920 break; 1921 case IP_VERSION(3, 1, 5): 1922 dmub_asic = DMUB_ASIC_DCN315; 1923 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 1924 break; 1925 case IP_VERSION(3, 1, 6): 1926 dmub_asic = DMUB_ASIC_DCN316; 1927 fw_name_dmub = FIRMWARE_DCN316_DMUB; 1928 break; 1929 default: 1930 /* ASIC doesn't support DMUB. */ 1931 return 0; 1932 } 1933 1934 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); 1935 if (r) { 1936 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 1937 return 0; 1938 } 1939 1940 r = amdgpu_ucode_validate(adev->dm.dmub_fw); 1941 if (r) { 1942 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); 1943 return 0; 1944 } 1945 1946 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 1947 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 1948 1949 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1950 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 1951 AMDGPU_UCODE_ID_DMCUB; 1952 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 1953 adev->dm.dmub_fw; 1954 adev->firmware.fw_size += 1955 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 1956 1957 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1958 adev->dm.dmcub_fw_version); 1959 } 1960 1961 1962 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1963 dmub_srv = adev->dm.dmub_srv; 1964 1965 if (!dmub_srv) { 1966 DRM_ERROR("Failed to allocate DMUB service!\n"); 1967 return -ENOMEM; 1968 } 1969 1970 memset(&create_params, 0, sizeof(create_params)); 1971 create_params.user_ctx = adev; 1972 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1973 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1974 create_params.asic = dmub_asic; 1975 1976 /* Create the DMUB service. */ 1977 status = dmub_srv_create(dmub_srv, &create_params); 1978 if (status != DMUB_STATUS_OK) { 1979 DRM_ERROR("Error creating DMUB service: %d\n", status); 1980 return -EINVAL; 1981 } 1982 1983 /* Calculate the size of all the regions for the DMUB service. */ 1984 memset(®ion_params, 0, sizeof(region_params)); 1985 1986 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1987 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1988 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1989 region_params.vbios_size = adev->bios_size; 1990 region_params.fw_bss_data = region_params.bss_data_size ? 1991 adev->dm.dmub_fw->data + 1992 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1993 le32_to_cpu(hdr->inst_const_bytes) : NULL; 1994 region_params.fw_inst_const = 1995 adev->dm.dmub_fw->data + 1996 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1997 PSP_HEADER_BYTES; 1998 1999 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2000 ®ion_info); 2001 2002 if (status != DMUB_STATUS_OK) { 2003 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2004 return -EINVAL; 2005 } 2006 2007 /* 2008 * Allocate a framebuffer based on the total size of all the regions. 2009 * TODO: Move this into GART. 2010 */ 2011 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2012 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 2013 &adev->dm.dmub_bo_gpu_addr, 2014 &adev->dm.dmub_bo_cpu_addr); 2015 if (r) 2016 return r; 2017 2018 /* Rebase the regions on the framebuffer address. */ 2019 memset(&fb_params, 0, sizeof(fb_params)); 2020 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 2021 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 2022 fb_params.region_info = ®ion_info; 2023 2024 adev->dm.dmub_fb_info = 2025 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2026 fb_info = adev->dm.dmub_fb_info; 2027 2028 if (!fb_info) { 2029 DRM_ERROR( 2030 "Failed to allocate framebuffer info for DMUB service!\n"); 2031 return -ENOMEM; 2032 } 2033 2034 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); 2035 if (status != DMUB_STATUS_OK) { 2036 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2037 return -EINVAL; 2038 } 2039 2040 return 0; 2041 } 2042 2043 static int dm_sw_init(void *handle) 2044 { 2045 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2046 int r; 2047 2048 r = dm_dmub_sw_init(adev); 2049 if (r) 2050 return r; 2051 2052 return load_dmcu_fw(adev); 2053 } 2054 2055 static int dm_sw_fini(void *handle) 2056 { 2057 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2058 2059 kfree(adev->dm.dmub_fb_info); 2060 adev->dm.dmub_fb_info = NULL; 2061 2062 if (adev->dm.dmub_srv) { 2063 dmub_srv_destroy(adev->dm.dmub_srv); 2064 adev->dm.dmub_srv = NULL; 2065 } 2066 2067 release_firmware(adev->dm.dmub_fw); 2068 adev->dm.dmub_fw = NULL; 2069 2070 release_firmware(adev->dm.fw_dmcu); 2071 adev->dm.fw_dmcu = NULL; 2072 2073 return 0; 2074 } 2075 2076 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2077 { 2078 struct amdgpu_dm_connector *aconnector; 2079 struct drm_connector *connector; 2080 struct drm_connector_list_iter iter; 2081 int ret = 0; 2082 2083 drm_connector_list_iter_begin(dev, &iter); 2084 drm_for_each_connector_iter(connector, &iter) { 2085 aconnector = to_amdgpu_dm_connector(connector); 2086 if (aconnector->dc_link->type == dc_connection_mst_branch && 2087 aconnector->mst_mgr.aux) { 2088 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 2089 aconnector, 2090 aconnector->base.base.id); 2091 2092 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2093 if (ret < 0) { 2094 DRM_ERROR("DM_MST: Failed to start MST\n"); 2095 aconnector->dc_link->type = 2096 dc_connection_single; 2097 break; 2098 } 2099 } 2100 } 2101 drm_connector_list_iter_end(&iter); 2102 2103 return ret; 2104 } 2105 2106 static int dm_late_init(void *handle) 2107 { 2108 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2109 2110 struct dmcu_iram_parameters params; 2111 unsigned int linear_lut[16]; 2112 int i; 2113 struct dmcu *dmcu = NULL; 2114 2115 dmcu = adev->dm.dc->res_pool->dmcu; 2116 2117 for (i = 0; i < 16; i++) 2118 linear_lut[i] = 0xFFFF * i / 15; 2119 2120 params.set = 0; 2121 params.backlight_ramping_override = false; 2122 params.backlight_ramping_start = 0xCCCC; 2123 params.backlight_ramping_reduction = 0xCCCCCCCC; 2124 params.backlight_lut_array_size = 16; 2125 params.backlight_lut_array = linear_lut; 2126 2127 /* Min backlight level after ABM reduction, Don't allow below 1% 2128 * 0xFFFF x 0.01 = 0x28F 2129 */ 2130 params.min_abm_backlight = 0x28F; 2131 /* In the case where abm is implemented on dmcub, 2132 * dmcu object will be null. 2133 * ABM 2.4 and up are implemented on dmcub. 2134 */ 2135 if (dmcu) { 2136 if (!dmcu_load_iram(dmcu, params)) 2137 return -EINVAL; 2138 } else if (adev->dm.dc->ctx->dmub_srv) { 2139 struct dc_link *edp_links[MAX_NUM_EDP]; 2140 int edp_num; 2141 2142 get_edp_links(adev->dm.dc, edp_links, &edp_num); 2143 for (i = 0; i < edp_num; i++) { 2144 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2145 return -EINVAL; 2146 } 2147 } 2148 2149 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2150 } 2151 2152 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2153 { 2154 struct amdgpu_dm_connector *aconnector; 2155 struct drm_connector *connector; 2156 struct drm_connector_list_iter iter; 2157 struct drm_dp_mst_topology_mgr *mgr; 2158 int ret; 2159 bool need_hotplug = false; 2160 2161 drm_connector_list_iter_begin(dev, &iter); 2162 drm_for_each_connector_iter(connector, &iter) { 2163 aconnector = to_amdgpu_dm_connector(connector); 2164 if (aconnector->dc_link->type != dc_connection_mst_branch || 2165 aconnector->mst_port) 2166 continue; 2167 2168 mgr = &aconnector->mst_mgr; 2169 2170 if (suspend) { 2171 drm_dp_mst_topology_mgr_suspend(mgr); 2172 } else { 2173 ret = drm_dp_mst_topology_mgr_resume(mgr, true); 2174 if (ret < 0) { 2175 drm_dp_mst_topology_mgr_set_mst(mgr, false); 2176 need_hotplug = true; 2177 } 2178 } 2179 } 2180 drm_connector_list_iter_end(&iter); 2181 2182 if (need_hotplug) 2183 drm_kms_helper_hotplug_event(dev); 2184 } 2185 2186 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2187 { 2188 int ret = 0; 2189 2190 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2191 * on window driver dc implementation. 2192 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2193 * should be passed to smu during boot up and resume from s3. 2194 * boot up: dc calculate dcn watermark clock settings within dc_create, 2195 * dcn20_resource_construct 2196 * then call pplib functions below to pass the settings to smu: 2197 * smu_set_watermarks_for_clock_ranges 2198 * smu_set_watermarks_table 2199 * navi10_set_watermarks_table 2200 * smu_write_watermarks_table 2201 * 2202 * For Renoir, clock settings of dcn watermark are also fixed values. 2203 * dc has implemented different flow for window driver: 2204 * dc_hardware_init / dc_set_power_state 2205 * dcn10_init_hw 2206 * notify_wm_ranges 2207 * set_wm_ranges 2208 * -- Linux 2209 * smu_set_watermarks_for_clock_ranges 2210 * renoir_set_watermarks_table 2211 * smu_write_watermarks_table 2212 * 2213 * For Linux, 2214 * dc_hardware_init -> amdgpu_dm_init 2215 * dc_set_power_state --> dm_resume 2216 * 2217 * therefore, this function apply to navi10/12/14 but not Renoir 2218 * * 2219 */ 2220 switch (adev->ip_versions[DCE_HWIP][0]) { 2221 case IP_VERSION(2, 0, 2): 2222 case IP_VERSION(2, 0, 0): 2223 break; 2224 default: 2225 return 0; 2226 } 2227 2228 ret = amdgpu_dpm_write_watermarks_table(adev); 2229 if (ret) { 2230 DRM_ERROR("Failed to update WMTABLE!\n"); 2231 return ret; 2232 } 2233 2234 return 0; 2235 } 2236 2237 /** 2238 * dm_hw_init() - Initialize DC device 2239 * @handle: The base driver device containing the amdgpu_dm device. 2240 * 2241 * Initialize the &struct amdgpu_display_manager device. This involves calling 2242 * the initializers of each DM component, then populating the struct with them. 2243 * 2244 * Although the function implies hardware initialization, both hardware and 2245 * software are initialized here. Splitting them out to their relevant init 2246 * hooks is a future TODO item. 2247 * 2248 * Some notable things that are initialized here: 2249 * 2250 * - Display Core, both software and hardware 2251 * - DC modules that we need (freesync and color management) 2252 * - DRM software states 2253 * - Interrupt sources and handlers 2254 * - Vblank support 2255 * - Debug FS entries, if enabled 2256 */ 2257 static int dm_hw_init(void *handle) 2258 { 2259 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2260 /* Create DAL display manager */ 2261 amdgpu_dm_init(adev); 2262 amdgpu_dm_hpd_init(adev); 2263 2264 return 0; 2265 } 2266 2267 /** 2268 * dm_hw_fini() - Teardown DC device 2269 * @handle: The base driver device containing the amdgpu_dm device. 2270 * 2271 * Teardown components within &struct amdgpu_display_manager that require 2272 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2273 * were loaded. Also flush IRQ workqueues and disable them. 2274 */ 2275 static int dm_hw_fini(void *handle) 2276 { 2277 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2278 2279 amdgpu_dm_hpd_fini(adev); 2280 2281 amdgpu_dm_irq_fini(adev); 2282 amdgpu_dm_fini(adev); 2283 return 0; 2284 } 2285 2286 2287 static int dm_enable_vblank(struct drm_crtc *crtc); 2288 static void dm_disable_vblank(struct drm_crtc *crtc); 2289 2290 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2291 struct dc_state *state, bool enable) 2292 { 2293 enum dc_irq_source irq_source; 2294 struct amdgpu_crtc *acrtc; 2295 int rc = -EBUSY; 2296 int i = 0; 2297 2298 for (i = 0; i < state->stream_count; i++) { 2299 acrtc = get_crtc_by_otg_inst( 2300 adev, state->stream_status[i].primary_otg_inst); 2301 2302 if (acrtc && state->stream_status[i].plane_count != 0) { 2303 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2304 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2305 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 2306 acrtc->crtc_id, enable ? "en" : "dis", rc); 2307 if (rc) 2308 DRM_WARN("Failed to %s pflip interrupts\n", 2309 enable ? "enable" : "disable"); 2310 2311 if (enable) { 2312 rc = dm_enable_vblank(&acrtc->base); 2313 if (rc) 2314 DRM_WARN("Failed to enable vblank interrupts\n"); 2315 } else { 2316 dm_disable_vblank(&acrtc->base); 2317 } 2318 2319 } 2320 } 2321 2322 } 2323 2324 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2325 { 2326 struct dc_state *context = NULL; 2327 enum dc_status res = DC_ERROR_UNEXPECTED; 2328 int i; 2329 struct dc_stream_state *del_streams[MAX_PIPES]; 2330 int del_streams_count = 0; 2331 2332 memset(del_streams, 0, sizeof(del_streams)); 2333 2334 context = dc_create_state(dc); 2335 if (context == NULL) 2336 goto context_alloc_fail; 2337 2338 dc_resource_state_copy_construct_current(dc, context); 2339 2340 /* First remove from context all streams */ 2341 for (i = 0; i < context->stream_count; i++) { 2342 struct dc_stream_state *stream = context->streams[i]; 2343 2344 del_streams[del_streams_count++] = stream; 2345 } 2346 2347 /* Remove all planes for removed streams and then remove the streams */ 2348 for (i = 0; i < del_streams_count; i++) { 2349 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2350 res = DC_FAIL_DETACH_SURFACES; 2351 goto fail; 2352 } 2353 2354 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2355 if (res != DC_OK) 2356 goto fail; 2357 } 2358 2359 res = dc_commit_state(dc, context); 2360 2361 fail: 2362 dc_release_state(context); 2363 2364 context_alloc_fail: 2365 return res; 2366 } 2367 2368 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2369 { 2370 int i; 2371 2372 if (dm->hpd_rx_offload_wq) { 2373 for (i = 0; i < dm->dc->caps.max_links; i++) 2374 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2375 } 2376 } 2377 2378 static int dm_suspend(void *handle) 2379 { 2380 struct amdgpu_device *adev = handle; 2381 struct amdgpu_display_manager *dm = &adev->dm; 2382 int ret = 0; 2383 2384 if (amdgpu_in_reset(adev)) { 2385 mutex_lock(&dm->dc_lock); 2386 2387 dc_allow_idle_optimizations(adev->dm.dc, false); 2388 2389 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2390 2391 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2392 2393 amdgpu_dm_commit_zero_streams(dm->dc); 2394 2395 amdgpu_dm_irq_suspend(adev); 2396 2397 hpd_rx_irq_work_suspend(dm); 2398 2399 return ret; 2400 } 2401 2402 WARN_ON(adev->dm.cached_state); 2403 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2404 2405 s3_handle_mst(adev_to_drm(adev), true); 2406 2407 amdgpu_dm_irq_suspend(adev); 2408 2409 hpd_rx_irq_work_suspend(dm); 2410 2411 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2412 2413 return 0; 2414 } 2415 2416 struct amdgpu_dm_connector * 2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2418 struct drm_crtc *crtc) 2419 { 2420 uint32_t i; 2421 struct drm_connector_state *new_con_state; 2422 struct drm_connector *connector; 2423 struct drm_crtc *crtc_from_state; 2424 2425 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2426 crtc_from_state = new_con_state->crtc; 2427 2428 if (crtc_from_state == crtc) 2429 return to_amdgpu_dm_connector(connector); 2430 } 2431 2432 return NULL; 2433 } 2434 2435 static void emulated_link_detect(struct dc_link *link) 2436 { 2437 struct dc_sink_init_data sink_init_data = { 0 }; 2438 struct display_sink_capability sink_caps = { 0 }; 2439 enum dc_edid_status edid_status; 2440 struct dc_context *dc_ctx = link->ctx; 2441 struct dc_sink *sink = NULL; 2442 struct dc_sink *prev_sink = NULL; 2443 2444 link->type = dc_connection_none; 2445 prev_sink = link->local_sink; 2446 2447 if (prev_sink) 2448 dc_sink_release(prev_sink); 2449 2450 switch (link->connector_signal) { 2451 case SIGNAL_TYPE_HDMI_TYPE_A: { 2452 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2453 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2454 break; 2455 } 2456 2457 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2458 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2459 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2460 break; 2461 } 2462 2463 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2464 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2465 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2466 break; 2467 } 2468 2469 case SIGNAL_TYPE_LVDS: { 2470 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2471 sink_caps.signal = SIGNAL_TYPE_LVDS; 2472 break; 2473 } 2474 2475 case SIGNAL_TYPE_EDP: { 2476 sink_caps.transaction_type = 2477 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2478 sink_caps.signal = SIGNAL_TYPE_EDP; 2479 break; 2480 } 2481 2482 case SIGNAL_TYPE_DISPLAY_PORT: { 2483 sink_caps.transaction_type = 2484 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2485 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2486 break; 2487 } 2488 2489 default: 2490 DC_ERROR("Invalid connector type! signal:%d\n", 2491 link->connector_signal); 2492 return; 2493 } 2494 2495 sink_init_data.link = link; 2496 sink_init_data.sink_signal = sink_caps.signal; 2497 2498 sink = dc_sink_create(&sink_init_data); 2499 if (!sink) { 2500 DC_ERROR("Failed to create sink!\n"); 2501 return; 2502 } 2503 2504 /* dc_sink_create returns a new reference */ 2505 link->local_sink = sink; 2506 2507 edid_status = dm_helpers_read_local_edid( 2508 link->ctx, 2509 link, 2510 sink); 2511 2512 if (edid_status != EDID_OK) 2513 DC_ERROR("Failed to read EDID"); 2514 2515 } 2516 2517 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2518 struct amdgpu_display_manager *dm) 2519 { 2520 struct { 2521 struct dc_surface_update surface_updates[MAX_SURFACES]; 2522 struct dc_plane_info plane_infos[MAX_SURFACES]; 2523 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2524 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2525 struct dc_stream_update stream_update; 2526 } * bundle; 2527 int k, m; 2528 2529 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2530 2531 if (!bundle) { 2532 dm_error("Failed to allocate update bundle\n"); 2533 goto cleanup; 2534 } 2535 2536 for (k = 0; k < dc_state->stream_count; k++) { 2537 bundle->stream_update.stream = dc_state->streams[k]; 2538 2539 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2540 bundle->surface_updates[m].surface = 2541 dc_state->stream_status->plane_states[m]; 2542 bundle->surface_updates[m].surface->force_full_update = 2543 true; 2544 } 2545 dc_commit_updates_for_stream( 2546 dm->dc, bundle->surface_updates, 2547 dc_state->stream_status->plane_count, 2548 dc_state->streams[k], &bundle->stream_update, dc_state); 2549 } 2550 2551 cleanup: 2552 kfree(bundle); 2553 2554 return; 2555 } 2556 2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) 2558 { 2559 struct dc_stream_state *stream_state; 2560 struct amdgpu_dm_connector *aconnector = link->priv; 2561 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 2562 struct dc_stream_update stream_update; 2563 bool dpms_off = true; 2564 2565 memset(&stream_update, 0, sizeof(stream_update)); 2566 stream_update.dpms_off = &dpms_off; 2567 2568 mutex_lock(&adev->dm.dc_lock); 2569 stream_state = dc_stream_find_from_link(link); 2570 2571 if (stream_state == NULL) { 2572 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); 2573 mutex_unlock(&adev->dm.dc_lock); 2574 return; 2575 } 2576 2577 stream_update.stream = stream_state; 2578 acrtc_state->force_dpms_off = true; 2579 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, 2580 stream_state, &stream_update, 2581 stream_state->ctx->dc->current_state); 2582 mutex_unlock(&adev->dm.dc_lock); 2583 } 2584 2585 static int dm_resume(void *handle) 2586 { 2587 struct amdgpu_device *adev = handle; 2588 struct drm_device *ddev = adev_to_drm(adev); 2589 struct amdgpu_display_manager *dm = &adev->dm; 2590 struct amdgpu_dm_connector *aconnector; 2591 struct drm_connector *connector; 2592 struct drm_connector_list_iter iter; 2593 struct drm_crtc *crtc; 2594 struct drm_crtc_state *new_crtc_state; 2595 struct dm_crtc_state *dm_new_crtc_state; 2596 struct drm_plane *plane; 2597 struct drm_plane_state *new_plane_state; 2598 struct dm_plane_state *dm_new_plane_state; 2599 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2600 enum dc_connection_type new_connection_type = dc_connection_none; 2601 struct dc_state *dc_state; 2602 int i, r, j; 2603 2604 if (amdgpu_in_reset(adev)) { 2605 dc_state = dm->cached_dc_state; 2606 2607 /* 2608 * The dc->current_state is backed up into dm->cached_dc_state 2609 * before we commit 0 streams. 2610 * 2611 * DC will clear link encoder assignments on the real state 2612 * but the changes won't propagate over to the copy we made 2613 * before the 0 streams commit. 2614 * 2615 * DC expects that link encoder assignments are *not* valid 2616 * when committing a state, so as a workaround we can copy 2617 * off of the current state. 2618 * 2619 * We lose the previous assignments, but we had already 2620 * commit 0 streams anyway. 2621 */ 2622 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 2623 2624 if (dc_enable_dmub_notifications(adev->dm.dc)) 2625 amdgpu_dm_outbox_init(adev); 2626 2627 r = dm_dmub_hw_init(adev); 2628 if (r) 2629 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2630 2631 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2632 dc_resume(dm->dc); 2633 2634 amdgpu_dm_irq_resume_early(adev); 2635 2636 for (i = 0; i < dc_state->stream_count; i++) { 2637 dc_state->streams[i]->mode_changed = true; 2638 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2639 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2640 = 0xffffffff; 2641 } 2642 } 2643 2644 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 2645 2646 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2647 2648 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2649 2650 dc_release_state(dm->cached_dc_state); 2651 dm->cached_dc_state = NULL; 2652 2653 amdgpu_dm_irq_resume_late(adev); 2654 2655 mutex_unlock(&dm->dc_lock); 2656 2657 return 0; 2658 } 2659 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2660 dc_release_state(dm_state->context); 2661 dm_state->context = dc_create_state(dm->dc); 2662 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2663 dc_resource_state_construct(dm->dc, dm_state->context); 2664 2665 /* Re-enable outbox interrupts for DPIA. */ 2666 if (dc_enable_dmub_notifications(adev->dm.dc)) 2667 amdgpu_dm_outbox_init(adev); 2668 2669 /* Before powering on DC we need to re-initialize DMUB. */ 2670 dm_dmub_hw_resume(adev); 2671 2672 /* power on hardware */ 2673 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2674 2675 /* program HPD filter */ 2676 dc_resume(dm->dc); 2677 2678 /* 2679 * early enable HPD Rx IRQ, should be done before set mode as short 2680 * pulse interrupts are used for MST 2681 */ 2682 amdgpu_dm_irq_resume_early(adev); 2683 2684 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2685 s3_handle_mst(ddev, false); 2686 2687 /* Do detection*/ 2688 drm_connector_list_iter_begin(ddev, &iter); 2689 drm_for_each_connector_iter(connector, &iter) { 2690 aconnector = to_amdgpu_dm_connector(connector); 2691 2692 /* 2693 * this is the case when traversing through already created 2694 * MST connectors, should be skipped 2695 */ 2696 if (aconnector->dc_link && 2697 aconnector->dc_link->type == dc_connection_mst_branch) 2698 continue; 2699 2700 mutex_lock(&aconnector->hpd_lock); 2701 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2702 DRM_ERROR("KMS: Failed to detect connector\n"); 2703 2704 if (aconnector->base.force && new_connection_type == dc_connection_none) 2705 emulated_link_detect(aconnector->dc_link); 2706 else 2707 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2708 2709 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2710 aconnector->fake_enable = false; 2711 2712 if (aconnector->dc_sink) 2713 dc_sink_release(aconnector->dc_sink); 2714 aconnector->dc_sink = NULL; 2715 amdgpu_dm_update_connector_after_detect(aconnector); 2716 mutex_unlock(&aconnector->hpd_lock); 2717 } 2718 drm_connector_list_iter_end(&iter); 2719 2720 /* Force mode set in atomic commit */ 2721 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2722 new_crtc_state->active_changed = true; 2723 2724 /* 2725 * atomic_check is expected to create the dc states. We need to release 2726 * them here, since they were duplicated as part of the suspend 2727 * procedure. 2728 */ 2729 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2730 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2731 if (dm_new_crtc_state->stream) { 2732 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2733 dc_stream_release(dm_new_crtc_state->stream); 2734 dm_new_crtc_state->stream = NULL; 2735 } 2736 } 2737 2738 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2739 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2740 if (dm_new_plane_state->dc_state) { 2741 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2742 dc_plane_state_release(dm_new_plane_state->dc_state); 2743 dm_new_plane_state->dc_state = NULL; 2744 } 2745 } 2746 2747 drm_atomic_helper_resume(ddev, dm->cached_state); 2748 2749 dm->cached_state = NULL; 2750 2751 amdgpu_dm_irq_resume_late(adev); 2752 2753 amdgpu_dm_smu_write_watermarks_table(adev); 2754 2755 return 0; 2756 } 2757 2758 /** 2759 * DOC: DM Lifecycle 2760 * 2761 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 2762 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 2763 * the base driver's device list to be initialized and torn down accordingly. 2764 * 2765 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 2766 */ 2767 2768 static const struct amd_ip_funcs amdgpu_dm_funcs = { 2769 .name = "dm", 2770 .early_init = dm_early_init, 2771 .late_init = dm_late_init, 2772 .sw_init = dm_sw_init, 2773 .sw_fini = dm_sw_fini, 2774 .early_fini = amdgpu_dm_early_fini, 2775 .hw_init = dm_hw_init, 2776 .hw_fini = dm_hw_fini, 2777 .suspend = dm_suspend, 2778 .resume = dm_resume, 2779 .is_idle = dm_is_idle, 2780 .wait_for_idle = dm_wait_for_idle, 2781 .check_soft_reset = dm_check_soft_reset, 2782 .soft_reset = dm_soft_reset, 2783 .set_clockgating_state = dm_set_clockgating_state, 2784 .set_powergating_state = dm_set_powergating_state, 2785 }; 2786 2787 const struct amdgpu_ip_block_version dm_ip_block = 2788 { 2789 .type = AMD_IP_BLOCK_TYPE_DCE, 2790 .major = 1, 2791 .minor = 0, 2792 .rev = 0, 2793 .funcs = &amdgpu_dm_funcs, 2794 }; 2795 2796 2797 /** 2798 * DOC: atomic 2799 * 2800 * *WIP* 2801 */ 2802 2803 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 2804 .fb_create = amdgpu_display_user_framebuffer_create, 2805 .get_format_info = amd_get_format_info, 2806 .output_poll_changed = drm_fb_helper_output_poll_changed, 2807 .atomic_check = amdgpu_dm_atomic_check, 2808 .atomic_commit = drm_atomic_helper_commit, 2809 }; 2810 2811 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 2812 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 2813 }; 2814 2815 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 2816 { 2817 u32 max_cll, min_cll, max, min, q, r; 2818 struct amdgpu_dm_backlight_caps *caps; 2819 struct amdgpu_display_manager *dm; 2820 struct drm_connector *conn_base; 2821 struct amdgpu_device *adev; 2822 struct dc_link *link = NULL; 2823 static const u8 pre_computed_values[] = { 2824 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2825 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2826 int i; 2827 2828 if (!aconnector || !aconnector->dc_link) 2829 return; 2830 2831 link = aconnector->dc_link; 2832 if (link->connector_signal != SIGNAL_TYPE_EDP) 2833 return; 2834 2835 conn_base = &aconnector->base; 2836 adev = drm_to_adev(conn_base->dev); 2837 dm = &adev->dm; 2838 for (i = 0; i < dm->num_of_edps; i++) { 2839 if (link == dm->backlight_link[i]) 2840 break; 2841 } 2842 if (i >= dm->num_of_edps) 2843 return; 2844 caps = &dm->backlight_caps[i]; 2845 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 2846 caps->aux_support = false; 2847 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; 2848 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; 2849 2850 if (caps->ext_caps->bits.oled == 1 /*|| 2851 caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 2852 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) 2853 caps->aux_support = true; 2854 2855 if (amdgpu_backlight == 0) 2856 caps->aux_support = false; 2857 else if (amdgpu_backlight == 1) 2858 caps->aux_support = true; 2859 2860 /* From the specification (CTA-861-G), for calculating the maximum 2861 * luminance we need to use: 2862 * Luminance = 50*2**(CV/32) 2863 * Where CV is a one-byte value. 2864 * For calculating this expression we may need float point precision; 2865 * to avoid this complexity level, we take advantage that CV is divided 2866 * by a constant. From the Euclids division algorithm, we know that CV 2867 * can be written as: CV = 32*q + r. Next, we replace CV in the 2868 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just 2869 * need to pre-compute the value of r/32. For pre-computing the values 2870 * We just used the following Ruby line: 2871 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} 2872 * The results of the above expressions can be verified at 2873 * pre_computed_values. 2874 */ 2875 q = max_cll >> 5; 2876 r = max_cll % 32; 2877 max = (1 << q) * pre_computed_values[r]; 2878 2879 // min luminance: maxLum * (CV/255)^2 / 100 2880 q = DIV_ROUND_CLOSEST(min_cll, 255); 2881 min = max * DIV_ROUND_CLOSEST((q * q), 100); 2882 2883 caps->aux_max_input_signal = max; 2884 caps->aux_min_input_signal = min; 2885 } 2886 2887 void amdgpu_dm_update_connector_after_detect( 2888 struct amdgpu_dm_connector *aconnector) 2889 { 2890 struct drm_connector *connector = &aconnector->base; 2891 struct drm_device *dev = connector->dev; 2892 struct dc_sink *sink; 2893 2894 /* MST handled by drm_mst framework */ 2895 if (aconnector->mst_mgr.mst_state == true) 2896 return; 2897 2898 sink = aconnector->dc_link->local_sink; 2899 if (sink) 2900 dc_sink_retain(sink); 2901 2902 /* 2903 * Edid mgmt connector gets first update only in mode_valid hook and then 2904 * the connector sink is set to either fake or physical sink depends on link status. 2905 * Skip if already done during boot. 2906 */ 2907 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 2908 && aconnector->dc_em_sink) { 2909 2910 /* 2911 * For S3 resume with headless use eml_sink to fake stream 2912 * because on resume connector->sink is set to NULL 2913 */ 2914 mutex_lock(&dev->mode_config.mutex); 2915 2916 if (sink) { 2917 if (aconnector->dc_sink) { 2918 amdgpu_dm_update_freesync_caps(connector, NULL); 2919 /* 2920 * retain and release below are used to 2921 * bump up refcount for sink because the link doesn't point 2922 * to it anymore after disconnect, so on next crtc to connector 2923 * reshuffle by UMD we will get into unwanted dc_sink release 2924 */ 2925 dc_sink_release(aconnector->dc_sink); 2926 } 2927 aconnector->dc_sink = sink; 2928 dc_sink_retain(aconnector->dc_sink); 2929 amdgpu_dm_update_freesync_caps(connector, 2930 aconnector->edid); 2931 } else { 2932 amdgpu_dm_update_freesync_caps(connector, NULL); 2933 if (!aconnector->dc_sink) { 2934 aconnector->dc_sink = aconnector->dc_em_sink; 2935 dc_sink_retain(aconnector->dc_sink); 2936 } 2937 } 2938 2939 mutex_unlock(&dev->mode_config.mutex); 2940 2941 if (sink) 2942 dc_sink_release(sink); 2943 return; 2944 } 2945 2946 /* 2947 * TODO: temporary guard to look for proper fix 2948 * if this sink is MST sink, we should not do anything 2949 */ 2950 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2951 dc_sink_release(sink); 2952 return; 2953 } 2954 2955 if (aconnector->dc_sink == sink) { 2956 /* 2957 * We got a DP short pulse (Link Loss, DP CTS, etc...). 2958 * Do nothing!! 2959 */ 2960 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 2961 aconnector->connector_id); 2962 if (sink) 2963 dc_sink_release(sink); 2964 return; 2965 } 2966 2967 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 2968 aconnector->connector_id, aconnector->dc_sink, sink); 2969 2970 mutex_lock(&dev->mode_config.mutex); 2971 2972 /* 2973 * 1. Update status of the drm connector 2974 * 2. Send an event and let userspace tell us what to do 2975 */ 2976 if (sink) { 2977 /* 2978 * TODO: check if we still need the S3 mode update workaround. 2979 * If yes, put it here. 2980 */ 2981 if (aconnector->dc_sink) { 2982 amdgpu_dm_update_freesync_caps(connector, NULL); 2983 dc_sink_release(aconnector->dc_sink); 2984 } 2985 2986 aconnector->dc_sink = sink; 2987 dc_sink_retain(aconnector->dc_sink); 2988 if (sink->dc_edid.length == 0) { 2989 aconnector->edid = NULL; 2990 if (aconnector->dc_link->aux_mode) { 2991 drm_dp_cec_unset_edid( 2992 &aconnector->dm_dp_aux.aux); 2993 } 2994 } else { 2995 aconnector->edid = 2996 (struct edid *)sink->dc_edid.raw_edid; 2997 2998 if (aconnector->dc_link->aux_mode) 2999 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 3000 aconnector->edid); 3001 } 3002 3003 drm_connector_update_edid_property(connector, aconnector->edid); 3004 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 3005 update_connector_ext_caps(aconnector); 3006 } else { 3007 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3008 amdgpu_dm_update_freesync_caps(connector, NULL); 3009 drm_connector_update_edid_property(connector, NULL); 3010 aconnector->num_modes = 0; 3011 dc_sink_release(aconnector->dc_sink); 3012 aconnector->dc_sink = NULL; 3013 aconnector->edid = NULL; 3014 #ifdef CONFIG_DRM_AMD_DC_HDCP 3015 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3016 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3017 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3018 #endif 3019 } 3020 3021 mutex_unlock(&dev->mode_config.mutex); 3022 3023 update_subconnector_property(aconnector); 3024 3025 if (sink) 3026 dc_sink_release(sink); 3027 } 3028 3029 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3030 { 3031 struct drm_connector *connector = &aconnector->base; 3032 struct drm_device *dev = connector->dev; 3033 enum dc_connection_type new_connection_type = dc_connection_none; 3034 struct amdgpu_device *adev = drm_to_adev(dev); 3035 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3036 struct dm_crtc_state *dm_crtc_state = NULL; 3037 3038 if (adev->dm.disable_hpd_irq) 3039 return; 3040 3041 if (dm_con_state->base.state && dm_con_state->base.crtc) 3042 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( 3043 dm_con_state->base.state, 3044 dm_con_state->base.crtc)); 3045 /* 3046 * In case of failure or MST no need to update connector status or notify the OS 3047 * since (for MST case) MST does this in its own context. 3048 */ 3049 mutex_lock(&aconnector->hpd_lock); 3050 3051 #ifdef CONFIG_DRM_AMD_DC_HDCP 3052 if (adev->dm.hdcp_workqueue) { 3053 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3054 dm_con_state->update_hdcp = true; 3055 } 3056 #endif 3057 if (aconnector->fake_enable) 3058 aconnector->fake_enable = false; 3059 3060 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 3061 DRM_ERROR("KMS: Failed to detect connector\n"); 3062 3063 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3064 emulated_link_detect(aconnector->dc_link); 3065 3066 drm_modeset_lock_all(dev); 3067 dm_restore_drm_connector_state(dev, connector); 3068 drm_modeset_unlock_all(dev); 3069 3070 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3071 drm_kms_helper_connector_hotplug_event(connector); 3072 3073 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 3074 if (new_connection_type == dc_connection_none && 3075 aconnector->dc_link->type == dc_connection_none && 3076 dm_crtc_state) 3077 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); 3078 3079 amdgpu_dm_update_connector_after_detect(aconnector); 3080 3081 drm_modeset_lock_all(dev); 3082 dm_restore_drm_connector_state(dev, connector); 3083 drm_modeset_unlock_all(dev); 3084 3085 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3086 drm_kms_helper_connector_hotplug_event(connector); 3087 } 3088 mutex_unlock(&aconnector->hpd_lock); 3089 3090 } 3091 3092 static void handle_hpd_irq(void *param) 3093 { 3094 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3095 3096 handle_hpd_irq_helper(aconnector); 3097 3098 } 3099 3100 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) 3101 { 3102 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 3103 uint8_t dret; 3104 bool new_irq_handled = false; 3105 int dpcd_addr; 3106 int dpcd_bytes_to_read; 3107 3108 const int max_process_count = 30; 3109 int process_count = 0; 3110 3111 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 3112 3113 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 3114 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 3115 /* DPCD 0x200 - 0x201 for downstream IRQ */ 3116 dpcd_addr = DP_SINK_COUNT; 3117 } else { 3118 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 3119 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 3120 dpcd_addr = DP_SINK_COUNT_ESI; 3121 } 3122 3123 dret = drm_dp_dpcd_read( 3124 &aconnector->dm_dp_aux.aux, 3125 dpcd_addr, 3126 esi, 3127 dpcd_bytes_to_read); 3128 3129 while (dret == dpcd_bytes_to_read && 3130 process_count < max_process_count) { 3131 uint8_t retry; 3132 dret = 0; 3133 3134 process_count++; 3135 3136 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 3137 /* handle HPD short pulse irq */ 3138 if (aconnector->mst_mgr.mst_state) 3139 drm_dp_mst_hpd_irq( 3140 &aconnector->mst_mgr, 3141 esi, 3142 &new_irq_handled); 3143 3144 if (new_irq_handled) { 3145 /* ACK at DPCD to notify down stream */ 3146 const int ack_dpcd_bytes_to_write = 3147 dpcd_bytes_to_read - 1; 3148 3149 for (retry = 0; retry < 3; retry++) { 3150 uint8_t wret; 3151 3152 wret = drm_dp_dpcd_write( 3153 &aconnector->dm_dp_aux.aux, 3154 dpcd_addr + 1, 3155 &esi[1], 3156 ack_dpcd_bytes_to_write); 3157 if (wret == ack_dpcd_bytes_to_write) 3158 break; 3159 } 3160 3161 /* check if there is new irq to be handled */ 3162 dret = drm_dp_dpcd_read( 3163 &aconnector->dm_dp_aux.aux, 3164 dpcd_addr, 3165 esi, 3166 dpcd_bytes_to_read); 3167 3168 new_irq_handled = false; 3169 } else { 3170 break; 3171 } 3172 } 3173 3174 if (process_count == max_process_count) 3175 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 3176 } 3177 3178 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3179 union hpd_irq_data hpd_irq_data) 3180 { 3181 struct hpd_rx_irq_offload_work *offload_work = 3182 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3183 3184 if (!offload_work) { 3185 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3186 return; 3187 } 3188 3189 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3190 offload_work->data = hpd_irq_data; 3191 offload_work->offload_wq = offload_wq; 3192 3193 queue_work(offload_wq->wq, &offload_work->work); 3194 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3195 } 3196 3197 static void handle_hpd_rx_irq(void *param) 3198 { 3199 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3200 struct drm_connector *connector = &aconnector->base; 3201 struct drm_device *dev = connector->dev; 3202 struct dc_link *dc_link = aconnector->dc_link; 3203 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3204 bool result = false; 3205 enum dc_connection_type new_connection_type = dc_connection_none; 3206 struct amdgpu_device *adev = drm_to_adev(dev); 3207 union hpd_irq_data hpd_irq_data; 3208 bool link_loss = false; 3209 bool has_left_work = false; 3210 int idx = aconnector->base.index; 3211 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3212 3213 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3214 3215 if (adev->dm.disable_hpd_irq) 3216 return; 3217 3218 /* 3219 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3220 * conflict, after implement i2c helper, this mutex should be 3221 * retired. 3222 */ 3223 mutex_lock(&aconnector->hpd_lock); 3224 3225 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3226 &link_loss, true, &has_left_work); 3227 3228 if (!has_left_work) 3229 goto out; 3230 3231 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3232 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3233 goto out; 3234 } 3235 3236 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3237 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3238 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3239 dm_handle_mst_sideband_msg(aconnector); 3240 goto out; 3241 } 3242 3243 if (link_loss) { 3244 bool skip = false; 3245 3246 spin_lock(&offload_wq->offload_lock); 3247 skip = offload_wq->is_handling_link_loss; 3248 3249 if (!skip) 3250 offload_wq->is_handling_link_loss = true; 3251 3252 spin_unlock(&offload_wq->offload_lock); 3253 3254 if (!skip) 3255 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3256 3257 goto out; 3258 } 3259 } 3260 3261 out: 3262 if (result && !is_mst_root_connector) { 3263 /* Downstream Port status changed. */ 3264 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 3265 DRM_ERROR("KMS: Failed to detect connector\n"); 3266 3267 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3268 emulated_link_detect(dc_link); 3269 3270 if (aconnector->fake_enable) 3271 aconnector->fake_enable = false; 3272 3273 amdgpu_dm_update_connector_after_detect(aconnector); 3274 3275 3276 drm_modeset_lock_all(dev); 3277 dm_restore_drm_connector_state(dev, connector); 3278 drm_modeset_unlock_all(dev); 3279 3280 drm_kms_helper_connector_hotplug_event(connector); 3281 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 3282 3283 if (aconnector->fake_enable) 3284 aconnector->fake_enable = false; 3285 3286 amdgpu_dm_update_connector_after_detect(aconnector); 3287 3288 3289 drm_modeset_lock_all(dev); 3290 dm_restore_drm_connector_state(dev, connector); 3291 drm_modeset_unlock_all(dev); 3292 3293 drm_kms_helper_connector_hotplug_event(connector); 3294 } 3295 } 3296 #ifdef CONFIG_DRM_AMD_DC_HDCP 3297 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3298 if (adev->dm.hdcp_workqueue) 3299 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3300 } 3301 #endif 3302 3303 if (dc_link->type != dc_connection_mst_branch) 3304 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3305 3306 mutex_unlock(&aconnector->hpd_lock); 3307 } 3308 3309 static void register_hpd_handlers(struct amdgpu_device *adev) 3310 { 3311 struct drm_device *dev = adev_to_drm(adev); 3312 struct drm_connector *connector; 3313 struct amdgpu_dm_connector *aconnector; 3314 const struct dc_link *dc_link; 3315 struct dc_interrupt_params int_params = {0}; 3316 3317 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3318 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3319 3320 list_for_each_entry(connector, 3321 &dev->mode_config.connector_list, head) { 3322 3323 aconnector = to_amdgpu_dm_connector(connector); 3324 dc_link = aconnector->dc_link; 3325 3326 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 3327 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3328 int_params.irq_source = dc_link->irq_source_hpd; 3329 3330 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3331 handle_hpd_irq, 3332 (void *) aconnector); 3333 } 3334 3335 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 3336 3337 /* Also register for DP short pulse (hpd_rx). */ 3338 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3339 int_params.irq_source = dc_link->irq_source_hpd_rx; 3340 3341 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3342 handle_hpd_rx_irq, 3343 (void *) aconnector); 3344 3345 if (adev->dm.hpd_rx_offload_wq) 3346 adev->dm.hpd_rx_offload_wq[connector->index].aconnector = 3347 aconnector; 3348 } 3349 } 3350 } 3351 3352 #if defined(CONFIG_DRM_AMD_DC_SI) 3353 /* Register IRQ sources and initialize IRQ callbacks */ 3354 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3355 { 3356 struct dc *dc = adev->dm.dc; 3357 struct common_irq_params *c_irq_params; 3358 struct dc_interrupt_params int_params = {0}; 3359 int r; 3360 int i; 3361 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3362 3363 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3364 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3365 3366 /* 3367 * Actions of amdgpu_irq_add_id(): 3368 * 1. Register a set() function with base driver. 3369 * Base driver will call set() function to enable/disable an 3370 * interrupt in DC hardware. 3371 * 2. Register amdgpu_dm_irq_handler(). 3372 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3373 * coming from DC hardware. 3374 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3375 * for acknowledging and handling. */ 3376 3377 /* Use VBLANK interrupt */ 3378 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3379 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); 3380 if (r) { 3381 DRM_ERROR("Failed to add crtc irq id!\n"); 3382 return r; 3383 } 3384 3385 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3386 int_params.irq_source = 3387 dc_interrupt_to_irq_source(dc, i+1 , 0); 3388 3389 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3390 3391 c_irq_params->adev = adev; 3392 c_irq_params->irq_src = int_params.irq_source; 3393 3394 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3395 dm_crtc_high_irq, c_irq_params); 3396 } 3397 3398 /* Use GRPH_PFLIP interrupt */ 3399 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3400 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3401 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3402 if (r) { 3403 DRM_ERROR("Failed to add page flip irq id!\n"); 3404 return r; 3405 } 3406 3407 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3408 int_params.irq_source = 3409 dc_interrupt_to_irq_source(dc, i, 0); 3410 3411 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3412 3413 c_irq_params->adev = adev; 3414 c_irq_params->irq_src = int_params.irq_source; 3415 3416 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3417 dm_pflip_high_irq, c_irq_params); 3418 3419 } 3420 3421 /* HPD */ 3422 r = amdgpu_irq_add_id(adev, client_id, 3423 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3424 if (r) { 3425 DRM_ERROR("Failed to add hpd irq id!\n"); 3426 return r; 3427 } 3428 3429 register_hpd_handlers(adev); 3430 3431 return 0; 3432 } 3433 #endif 3434 3435 /* Register IRQ sources and initialize IRQ callbacks */ 3436 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3437 { 3438 struct dc *dc = adev->dm.dc; 3439 struct common_irq_params *c_irq_params; 3440 struct dc_interrupt_params int_params = {0}; 3441 int r; 3442 int i; 3443 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3444 3445 if (adev->family >= AMDGPU_FAMILY_AI) 3446 client_id = SOC15_IH_CLIENTID_DCE; 3447 3448 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3449 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3450 3451 /* 3452 * Actions of amdgpu_irq_add_id(): 3453 * 1. Register a set() function with base driver. 3454 * Base driver will call set() function to enable/disable an 3455 * interrupt in DC hardware. 3456 * 2. Register amdgpu_dm_irq_handler(). 3457 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3458 * coming from DC hardware. 3459 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3460 * for acknowledging and handling. */ 3461 3462 /* Use VBLANK interrupt */ 3463 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3464 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3465 if (r) { 3466 DRM_ERROR("Failed to add crtc irq id!\n"); 3467 return r; 3468 } 3469 3470 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3471 int_params.irq_source = 3472 dc_interrupt_to_irq_source(dc, i, 0); 3473 3474 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3475 3476 c_irq_params->adev = adev; 3477 c_irq_params->irq_src = int_params.irq_source; 3478 3479 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3480 dm_crtc_high_irq, c_irq_params); 3481 } 3482 3483 /* Use VUPDATE interrupt */ 3484 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3485 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3486 if (r) { 3487 DRM_ERROR("Failed to add vupdate irq id!\n"); 3488 return r; 3489 } 3490 3491 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3492 int_params.irq_source = 3493 dc_interrupt_to_irq_source(dc, i, 0); 3494 3495 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3496 3497 c_irq_params->adev = adev; 3498 c_irq_params->irq_src = int_params.irq_source; 3499 3500 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3501 dm_vupdate_high_irq, c_irq_params); 3502 } 3503 3504 /* Use GRPH_PFLIP interrupt */ 3505 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3506 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3507 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3508 if (r) { 3509 DRM_ERROR("Failed to add page flip irq id!\n"); 3510 return r; 3511 } 3512 3513 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3514 int_params.irq_source = 3515 dc_interrupt_to_irq_source(dc, i, 0); 3516 3517 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3518 3519 c_irq_params->adev = adev; 3520 c_irq_params->irq_src = int_params.irq_source; 3521 3522 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3523 dm_pflip_high_irq, c_irq_params); 3524 3525 } 3526 3527 /* HPD */ 3528 r = amdgpu_irq_add_id(adev, client_id, 3529 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3530 if (r) { 3531 DRM_ERROR("Failed to add hpd irq id!\n"); 3532 return r; 3533 } 3534 3535 register_hpd_handlers(adev); 3536 3537 return 0; 3538 } 3539 3540 /* Register IRQ sources and initialize IRQ callbacks */ 3541 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3542 { 3543 struct dc *dc = adev->dm.dc; 3544 struct common_irq_params *c_irq_params; 3545 struct dc_interrupt_params int_params = {0}; 3546 int r; 3547 int i; 3548 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3549 static const unsigned int vrtl_int_srcid[] = { 3550 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3551 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3552 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3553 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3554 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3555 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3556 }; 3557 #endif 3558 3559 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3560 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3561 3562 /* 3563 * Actions of amdgpu_irq_add_id(): 3564 * 1. Register a set() function with base driver. 3565 * Base driver will call set() function to enable/disable an 3566 * interrupt in DC hardware. 3567 * 2. Register amdgpu_dm_irq_handler(). 3568 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3569 * coming from DC hardware. 3570 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3571 * for acknowledging and handling. 3572 */ 3573 3574 /* Use VSTARTUP interrupt */ 3575 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3576 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3577 i++) { 3578 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3579 3580 if (r) { 3581 DRM_ERROR("Failed to add crtc irq id!\n"); 3582 return r; 3583 } 3584 3585 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3586 int_params.irq_source = 3587 dc_interrupt_to_irq_source(dc, i, 0); 3588 3589 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3590 3591 c_irq_params->adev = adev; 3592 c_irq_params->irq_src = int_params.irq_source; 3593 3594 amdgpu_dm_irq_register_interrupt( 3595 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3596 } 3597 3598 /* Use otg vertical line interrupt */ 3599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3600 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3601 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3602 vrtl_int_srcid[i], &adev->vline0_irq); 3603 3604 if (r) { 3605 DRM_ERROR("Failed to add vline0 irq id!\n"); 3606 return r; 3607 } 3608 3609 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3610 int_params.irq_source = 3611 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3612 3613 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3614 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3615 break; 3616 } 3617 3618 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3619 - DC_IRQ_SOURCE_DC1_VLINE0]; 3620 3621 c_irq_params->adev = adev; 3622 c_irq_params->irq_src = int_params.irq_source; 3623 3624 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3625 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3626 } 3627 #endif 3628 3629 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3630 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3631 * to trigger at end of each vblank, regardless of state of the lock, 3632 * matching DCE behaviour. 3633 */ 3634 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3635 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3636 i++) { 3637 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3638 3639 if (r) { 3640 DRM_ERROR("Failed to add vupdate irq id!\n"); 3641 return r; 3642 } 3643 3644 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3645 int_params.irq_source = 3646 dc_interrupt_to_irq_source(dc, i, 0); 3647 3648 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3649 3650 c_irq_params->adev = adev; 3651 c_irq_params->irq_src = int_params.irq_source; 3652 3653 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3654 dm_vupdate_high_irq, c_irq_params); 3655 } 3656 3657 /* Use GRPH_PFLIP interrupt */ 3658 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3659 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3660 i++) { 3661 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3662 if (r) { 3663 DRM_ERROR("Failed to add page flip irq id!\n"); 3664 return r; 3665 } 3666 3667 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3668 int_params.irq_source = 3669 dc_interrupt_to_irq_source(dc, i, 0); 3670 3671 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3672 3673 c_irq_params->adev = adev; 3674 c_irq_params->irq_src = int_params.irq_source; 3675 3676 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3677 dm_pflip_high_irq, c_irq_params); 3678 3679 } 3680 3681 /* HPD */ 3682 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3683 &adev->hpd_irq); 3684 if (r) { 3685 DRM_ERROR("Failed to add hpd irq id!\n"); 3686 return r; 3687 } 3688 3689 register_hpd_handlers(adev); 3690 3691 return 0; 3692 } 3693 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3694 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3695 { 3696 struct dc *dc = adev->dm.dc; 3697 struct common_irq_params *c_irq_params; 3698 struct dc_interrupt_params int_params = {0}; 3699 int r, i; 3700 3701 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3702 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3703 3704 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3705 &adev->dmub_outbox_irq); 3706 if (r) { 3707 DRM_ERROR("Failed to add outbox irq id!\n"); 3708 return r; 3709 } 3710 3711 if (dc->ctx->dmub_srv) { 3712 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3713 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3714 int_params.irq_source = 3715 dc_interrupt_to_irq_source(dc, i, 0); 3716 3717 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3718 3719 c_irq_params->adev = adev; 3720 c_irq_params->irq_src = int_params.irq_source; 3721 3722 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3723 dm_dmub_outbox1_low_irq, c_irq_params); 3724 } 3725 3726 return 0; 3727 } 3728 3729 /* 3730 * Acquires the lock for the atomic state object and returns 3731 * the new atomic state. 3732 * 3733 * This should only be called during atomic check. 3734 */ 3735 int dm_atomic_get_state(struct drm_atomic_state *state, 3736 struct dm_atomic_state **dm_state) 3737 { 3738 struct drm_device *dev = state->dev; 3739 struct amdgpu_device *adev = drm_to_adev(dev); 3740 struct amdgpu_display_manager *dm = &adev->dm; 3741 struct drm_private_state *priv_state; 3742 3743 if (*dm_state) 3744 return 0; 3745 3746 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3747 if (IS_ERR(priv_state)) 3748 return PTR_ERR(priv_state); 3749 3750 *dm_state = to_dm_atomic_state(priv_state); 3751 3752 return 0; 3753 } 3754 3755 static struct dm_atomic_state * 3756 dm_atomic_get_new_state(struct drm_atomic_state *state) 3757 { 3758 struct drm_device *dev = state->dev; 3759 struct amdgpu_device *adev = drm_to_adev(dev); 3760 struct amdgpu_display_manager *dm = &adev->dm; 3761 struct drm_private_obj *obj; 3762 struct drm_private_state *new_obj_state; 3763 int i; 3764 3765 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3766 if (obj->funcs == dm->atomic_obj.funcs) 3767 return to_dm_atomic_state(new_obj_state); 3768 } 3769 3770 return NULL; 3771 } 3772 3773 static struct drm_private_state * 3774 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3775 { 3776 struct dm_atomic_state *old_state, *new_state; 3777 3778 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3779 if (!new_state) 3780 return NULL; 3781 3782 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3783 3784 old_state = to_dm_atomic_state(obj->state); 3785 3786 if (old_state && old_state->context) 3787 new_state->context = dc_copy_state(old_state->context); 3788 3789 if (!new_state->context) { 3790 kfree(new_state); 3791 return NULL; 3792 } 3793 3794 return &new_state->base; 3795 } 3796 3797 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3798 struct drm_private_state *state) 3799 { 3800 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3801 3802 if (dm_state && dm_state->context) 3803 dc_release_state(dm_state->context); 3804 3805 kfree(dm_state); 3806 } 3807 3808 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3809 .atomic_duplicate_state = dm_atomic_duplicate_state, 3810 .atomic_destroy_state = dm_atomic_destroy_state, 3811 }; 3812 3813 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3814 { 3815 struct dm_atomic_state *state; 3816 int r; 3817 3818 adev->mode_info.mode_config_initialized = true; 3819 3820 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3821 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3822 3823 adev_to_drm(adev)->mode_config.max_width = 16384; 3824 adev_to_drm(adev)->mode_config.max_height = 16384; 3825 3826 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3827 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 3828 /* indicates support for immediate flip */ 3829 adev_to_drm(adev)->mode_config.async_page_flip = true; 3830 3831 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 3832 3833 state = kzalloc(sizeof(*state), GFP_KERNEL); 3834 if (!state) 3835 return -ENOMEM; 3836 3837 state->context = dc_create_state(adev->dm.dc); 3838 if (!state->context) { 3839 kfree(state); 3840 return -ENOMEM; 3841 } 3842 3843 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 3844 3845 drm_atomic_private_obj_init(adev_to_drm(adev), 3846 &adev->dm.atomic_obj, 3847 &state->base, 3848 &dm_atomic_state_funcs); 3849 3850 r = amdgpu_display_modeset_create_props(adev); 3851 if (r) { 3852 dc_release_state(state->context); 3853 kfree(state); 3854 return r; 3855 } 3856 3857 r = amdgpu_dm_audio_init(adev); 3858 if (r) { 3859 dc_release_state(state->context); 3860 kfree(state); 3861 return r; 3862 } 3863 3864 return 0; 3865 } 3866 3867 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 3868 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 3869 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 3870 3871 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3872 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3873 3874 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 3875 int bl_idx) 3876 { 3877 #if defined(CONFIG_ACPI) 3878 struct amdgpu_dm_backlight_caps caps; 3879 3880 memset(&caps, 0, sizeof(caps)); 3881 3882 if (dm->backlight_caps[bl_idx].caps_valid) 3883 return; 3884 3885 amdgpu_acpi_get_backlight_caps(&caps); 3886 if (caps.caps_valid) { 3887 dm->backlight_caps[bl_idx].caps_valid = true; 3888 if (caps.aux_support) 3889 return; 3890 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 3891 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 3892 } else { 3893 dm->backlight_caps[bl_idx].min_input_signal = 3894 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3895 dm->backlight_caps[bl_idx].max_input_signal = 3896 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3897 } 3898 #else 3899 if (dm->backlight_caps[bl_idx].aux_support) 3900 return; 3901 3902 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3903 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3904 #endif 3905 } 3906 3907 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 3908 unsigned *min, unsigned *max) 3909 { 3910 if (!caps) 3911 return 0; 3912 3913 if (caps->aux_support) { 3914 // Firmware limits are in nits, DC API wants millinits. 3915 *max = 1000 * caps->aux_max_input_signal; 3916 *min = 1000 * caps->aux_min_input_signal; 3917 } else { 3918 // Firmware limits are 8-bit, PWM control is 16-bit. 3919 *max = 0x101 * caps->max_input_signal; 3920 *min = 0x101 * caps->min_input_signal; 3921 } 3922 return 1; 3923 } 3924 3925 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 3926 uint32_t brightness) 3927 { 3928 unsigned min, max; 3929 3930 if (!get_brightness_range(caps, &min, &max)) 3931 return brightness; 3932 3933 // Rescale 0..255 to min..max 3934 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 3935 AMDGPU_MAX_BL_LEVEL); 3936 } 3937 3938 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 3939 uint32_t brightness) 3940 { 3941 unsigned min, max; 3942 3943 if (!get_brightness_range(caps, &min, &max)) 3944 return brightness; 3945 3946 if (brightness < min) 3947 return 0; 3948 // Rescale min..max to 0..255 3949 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 3950 max - min); 3951 } 3952 3953 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 3954 int bl_idx, 3955 u32 user_brightness) 3956 { 3957 struct amdgpu_dm_backlight_caps caps; 3958 struct dc_link *link; 3959 u32 brightness; 3960 bool rc; 3961 3962 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3963 caps = dm->backlight_caps[bl_idx]; 3964 3965 dm->brightness[bl_idx] = user_brightness; 3966 /* update scratch register */ 3967 if (bl_idx == 0) 3968 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 3969 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 3970 link = (struct dc_link *)dm->backlight_link[bl_idx]; 3971 3972 /* Change brightness based on AUX property */ 3973 if (caps.aux_support) { 3974 rc = dc_link_set_backlight_level_nits(link, true, brightness, 3975 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 3976 if (!rc) 3977 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 3978 } else { 3979 rc = dc_link_set_backlight_level(link, brightness, 0); 3980 if (!rc) 3981 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 3982 } 3983 3984 if (rc) 3985 dm->actual_brightness[bl_idx] = user_brightness; 3986 } 3987 3988 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 3989 { 3990 struct amdgpu_display_manager *dm = bl_get_data(bd); 3991 int i; 3992 3993 for (i = 0; i < dm->num_of_edps; i++) { 3994 if (bd == dm->backlight_dev[i]) 3995 break; 3996 } 3997 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3998 i = 0; 3999 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4000 4001 return 0; 4002 } 4003 4004 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4005 int bl_idx) 4006 { 4007 struct amdgpu_dm_backlight_caps caps; 4008 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4009 4010 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4011 caps = dm->backlight_caps[bl_idx]; 4012 4013 if (caps.aux_support) { 4014 u32 avg, peak; 4015 bool rc; 4016 4017 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4018 if (!rc) 4019 return dm->brightness[bl_idx]; 4020 return convert_brightness_to_user(&caps, avg); 4021 } else { 4022 int ret = dc_link_get_backlight_level(link); 4023 4024 if (ret == DC_ERROR_UNEXPECTED) 4025 return dm->brightness[bl_idx]; 4026 return convert_brightness_to_user(&caps, ret); 4027 } 4028 } 4029 4030 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4031 { 4032 struct amdgpu_display_manager *dm = bl_get_data(bd); 4033 int i; 4034 4035 for (i = 0; i < dm->num_of_edps; i++) { 4036 if (bd == dm->backlight_dev[i]) 4037 break; 4038 } 4039 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4040 i = 0; 4041 return amdgpu_dm_backlight_get_level(dm, i); 4042 } 4043 4044 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4045 .options = BL_CORE_SUSPENDRESUME, 4046 .get_brightness = amdgpu_dm_backlight_get_brightness, 4047 .update_status = amdgpu_dm_backlight_update_status, 4048 }; 4049 4050 static void 4051 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 4052 { 4053 char bl_name[16]; 4054 struct backlight_properties props = { 0 }; 4055 4056 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); 4057 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; 4058 4059 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4060 props.brightness = AMDGPU_MAX_BL_LEVEL; 4061 props.type = BACKLIGHT_RAW; 4062 4063 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4064 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); 4065 4066 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, 4067 adev_to_drm(dm->adev)->dev, 4068 dm, 4069 &amdgpu_dm_backlight_ops, 4070 &props); 4071 4072 if (IS_ERR(dm->backlight_dev[dm->num_of_edps])) 4073 DRM_ERROR("DM: Backlight registration failed!\n"); 4074 else 4075 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4076 } 4077 #endif 4078 4079 static int initialize_plane(struct amdgpu_display_manager *dm, 4080 struct amdgpu_mode_info *mode_info, int plane_id, 4081 enum drm_plane_type plane_type, 4082 const struct dc_plane_cap *plane_cap) 4083 { 4084 struct drm_plane *plane; 4085 unsigned long possible_crtcs; 4086 int ret = 0; 4087 4088 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4089 if (!plane) { 4090 DRM_ERROR("KMS: Failed to allocate plane\n"); 4091 return -ENOMEM; 4092 } 4093 plane->type = plane_type; 4094 4095 /* 4096 * HACK: IGT tests expect that the primary plane for a CRTC 4097 * can only have one possible CRTC. Only expose support for 4098 * any CRTC if they're not going to be used as a primary plane 4099 * for a CRTC - like overlay or underlay planes. 4100 */ 4101 possible_crtcs = 1 << plane_id; 4102 if (plane_id >= dm->dc->caps.max_streams) 4103 possible_crtcs = 0xff; 4104 4105 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4106 4107 if (ret) { 4108 DRM_ERROR("KMS: Failed to initialize plane\n"); 4109 kfree(plane); 4110 return ret; 4111 } 4112 4113 if (mode_info) 4114 mode_info->planes[plane_id] = plane; 4115 4116 return ret; 4117 } 4118 4119 4120 static void register_backlight_device(struct amdgpu_display_manager *dm, 4121 struct dc_link *link) 4122 { 4123 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 4124 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 4125 4126 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 4127 link->type != dc_connection_none) { 4128 /* 4129 * Event if registration failed, we should continue with 4130 * DM initialization because not having a backlight control 4131 * is better then a black screen. 4132 */ 4133 if (!dm->backlight_dev[dm->num_of_edps]) 4134 amdgpu_dm_register_backlight_device(dm); 4135 4136 if (dm->backlight_dev[dm->num_of_edps]) { 4137 dm->backlight_link[dm->num_of_edps] = link; 4138 dm->num_of_edps++; 4139 } 4140 } 4141 #endif 4142 } 4143 4144 4145 /* 4146 * In this architecture, the association 4147 * connector -> encoder -> crtc 4148 * id not really requried. The crtc and connector will hold the 4149 * display_index as an abstraction to use with DAL component 4150 * 4151 * Returns 0 on success 4152 */ 4153 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4154 { 4155 struct amdgpu_display_manager *dm = &adev->dm; 4156 int32_t i; 4157 struct amdgpu_dm_connector *aconnector = NULL; 4158 struct amdgpu_encoder *aencoder = NULL; 4159 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4160 uint32_t link_cnt; 4161 int32_t primary_planes; 4162 enum dc_connection_type new_connection_type = dc_connection_none; 4163 const struct dc_plane_cap *plane; 4164 bool psr_feature_enabled = false; 4165 4166 dm->display_indexes_num = dm->dc->caps.max_streams; 4167 /* Update the actual used number of crtc */ 4168 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4169 4170 link_cnt = dm->dc->caps.max_links; 4171 if (amdgpu_dm_mode_config_init(dm->adev)) { 4172 DRM_ERROR("DM: Failed to initialize mode config\n"); 4173 return -EINVAL; 4174 } 4175 4176 /* There is one primary plane per CRTC */ 4177 primary_planes = dm->dc->caps.max_streams; 4178 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4179 4180 /* 4181 * Initialize primary planes, implicit planes for legacy IOCTLS. 4182 * Order is reversed to match iteration order in atomic check. 4183 */ 4184 for (i = (primary_planes - 1); i >= 0; i--) { 4185 plane = &dm->dc->caps.planes[i]; 4186 4187 if (initialize_plane(dm, mode_info, i, 4188 DRM_PLANE_TYPE_PRIMARY, plane)) { 4189 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4190 goto fail; 4191 } 4192 } 4193 4194 /* 4195 * Initialize overlay planes, index starting after primary planes. 4196 * These planes have a higher DRM index than the primary planes since 4197 * they should be considered as having a higher z-order. 4198 * Order is reversed to match iteration order in atomic check. 4199 * 4200 * Only support DCN for now, and only expose one so we don't encourage 4201 * userspace to use up all the pipes. 4202 */ 4203 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4204 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4205 4206 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4207 continue; 4208 4209 if (!plane->blends_with_above || !plane->blends_with_below) 4210 continue; 4211 4212 if (!plane->pixel_format_support.argb8888) 4213 continue; 4214 4215 if (initialize_plane(dm, NULL, primary_planes + i, 4216 DRM_PLANE_TYPE_OVERLAY, plane)) { 4217 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4218 goto fail; 4219 } 4220 4221 /* Only create one overlay plane. */ 4222 break; 4223 } 4224 4225 for (i = 0; i < dm->dc->caps.max_streams; i++) 4226 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4227 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4228 goto fail; 4229 } 4230 4231 /* Use Outbox interrupt */ 4232 switch (adev->ip_versions[DCE_HWIP][0]) { 4233 case IP_VERSION(3, 0, 0): 4234 case IP_VERSION(3, 1, 2): 4235 case IP_VERSION(3, 1, 3): 4236 case IP_VERSION(3, 1, 5): 4237 case IP_VERSION(3, 1, 6): 4238 case IP_VERSION(2, 1, 0): 4239 if (register_outbox_irq_handlers(dm->adev)) { 4240 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4241 goto fail; 4242 } 4243 break; 4244 default: 4245 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4246 adev->ip_versions[DCE_HWIP][0]); 4247 } 4248 4249 /* Determine whether to enable PSR support by default. */ 4250 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4251 switch (adev->ip_versions[DCE_HWIP][0]) { 4252 case IP_VERSION(3, 1, 2): 4253 case IP_VERSION(3, 1, 3): 4254 case IP_VERSION(3, 1, 5): 4255 case IP_VERSION(3, 1, 6): 4256 psr_feature_enabled = true; 4257 break; 4258 default: 4259 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4260 break; 4261 } 4262 } 4263 4264 /* Disable vblank IRQs aggressively for power-saving. */ 4265 adev_to_drm(adev)->vblank_disable_immediate = true; 4266 4267 /* loops over all connectors on the board */ 4268 for (i = 0; i < link_cnt; i++) { 4269 struct dc_link *link = NULL; 4270 4271 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4272 DRM_ERROR( 4273 "KMS: Cannot support more than %d display indexes\n", 4274 AMDGPU_DM_MAX_DISPLAY_INDEX); 4275 continue; 4276 } 4277 4278 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4279 if (!aconnector) 4280 goto fail; 4281 4282 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4283 if (!aencoder) 4284 goto fail; 4285 4286 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4287 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4288 goto fail; 4289 } 4290 4291 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4292 DRM_ERROR("KMS: Failed to initialize connector\n"); 4293 goto fail; 4294 } 4295 4296 link = dc_get_link_at_index(dm->dc, i); 4297 4298 if (!dc_link_detect_sink(link, &new_connection_type)) 4299 DRM_ERROR("KMS: Failed to detect connector\n"); 4300 4301 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4302 emulated_link_detect(link); 4303 amdgpu_dm_update_connector_after_detect(aconnector); 4304 4305 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { 4306 amdgpu_dm_update_connector_after_detect(aconnector); 4307 register_backlight_device(dm, link); 4308 if (dm->num_of_edps) 4309 update_connector_ext_caps(aconnector); 4310 if (psr_feature_enabled) 4311 amdgpu_dm_set_psr_caps(link); 4312 4313 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 4314 * PSR is also supported. 4315 */ 4316 if (link->psr_settings.psr_feature_enabled) 4317 adev_to_drm(adev)->vblank_disable_immediate = false; 4318 } 4319 4320 4321 } 4322 4323 /* Software is initialized. Now we can register interrupt handlers. */ 4324 switch (adev->asic_type) { 4325 #if defined(CONFIG_DRM_AMD_DC_SI) 4326 case CHIP_TAHITI: 4327 case CHIP_PITCAIRN: 4328 case CHIP_VERDE: 4329 case CHIP_OLAND: 4330 if (dce60_register_irq_handlers(dm->adev)) { 4331 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4332 goto fail; 4333 } 4334 break; 4335 #endif 4336 case CHIP_BONAIRE: 4337 case CHIP_HAWAII: 4338 case CHIP_KAVERI: 4339 case CHIP_KABINI: 4340 case CHIP_MULLINS: 4341 case CHIP_TONGA: 4342 case CHIP_FIJI: 4343 case CHIP_CARRIZO: 4344 case CHIP_STONEY: 4345 case CHIP_POLARIS11: 4346 case CHIP_POLARIS10: 4347 case CHIP_POLARIS12: 4348 case CHIP_VEGAM: 4349 case CHIP_VEGA10: 4350 case CHIP_VEGA12: 4351 case CHIP_VEGA20: 4352 if (dce110_register_irq_handlers(dm->adev)) { 4353 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4354 goto fail; 4355 } 4356 break; 4357 default: 4358 switch (adev->ip_versions[DCE_HWIP][0]) { 4359 case IP_VERSION(1, 0, 0): 4360 case IP_VERSION(1, 0, 1): 4361 case IP_VERSION(2, 0, 2): 4362 case IP_VERSION(2, 0, 3): 4363 case IP_VERSION(2, 0, 0): 4364 case IP_VERSION(2, 1, 0): 4365 case IP_VERSION(3, 0, 0): 4366 case IP_VERSION(3, 0, 2): 4367 case IP_VERSION(3, 0, 3): 4368 case IP_VERSION(3, 0, 1): 4369 case IP_VERSION(3, 1, 2): 4370 case IP_VERSION(3, 1, 3): 4371 case IP_VERSION(3, 1, 5): 4372 case IP_VERSION(3, 1, 6): 4373 if (dcn10_register_irq_handlers(dm->adev)) { 4374 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4375 goto fail; 4376 } 4377 break; 4378 default: 4379 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4380 adev->ip_versions[DCE_HWIP][0]); 4381 goto fail; 4382 } 4383 break; 4384 } 4385 4386 return 0; 4387 fail: 4388 kfree(aencoder); 4389 kfree(aconnector); 4390 4391 return -EINVAL; 4392 } 4393 4394 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4395 { 4396 drm_atomic_private_obj_fini(&dm->atomic_obj); 4397 return; 4398 } 4399 4400 /****************************************************************************** 4401 * amdgpu_display_funcs functions 4402 *****************************************************************************/ 4403 4404 /* 4405 * dm_bandwidth_update - program display watermarks 4406 * 4407 * @adev: amdgpu_device pointer 4408 * 4409 * Calculate and program the display watermarks and line buffer allocation. 4410 */ 4411 static void dm_bandwidth_update(struct amdgpu_device *adev) 4412 { 4413 /* TODO: implement later */ 4414 } 4415 4416 static const struct amdgpu_display_funcs dm_display_funcs = { 4417 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4418 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4419 .backlight_set_level = NULL, /* never called for DC */ 4420 .backlight_get_level = NULL, /* never called for DC */ 4421 .hpd_sense = NULL,/* called unconditionally */ 4422 .hpd_set_polarity = NULL, /* called unconditionally */ 4423 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4424 .page_flip_get_scanoutpos = 4425 dm_crtc_get_scanoutpos,/* called unconditionally */ 4426 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4427 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4428 }; 4429 4430 #if defined(CONFIG_DEBUG_KERNEL_DC) 4431 4432 static ssize_t s3_debug_store(struct device *device, 4433 struct device_attribute *attr, 4434 const char *buf, 4435 size_t count) 4436 { 4437 int ret; 4438 int s3_state; 4439 struct drm_device *drm_dev = dev_get_drvdata(device); 4440 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4441 4442 ret = kstrtoint(buf, 0, &s3_state); 4443 4444 if (ret == 0) { 4445 if (s3_state) { 4446 dm_resume(adev); 4447 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4448 } else 4449 dm_suspend(adev); 4450 } 4451 4452 return ret == 0 ? count : 0; 4453 } 4454 4455 DEVICE_ATTR_WO(s3_debug); 4456 4457 #endif 4458 4459 static int dm_early_init(void *handle) 4460 { 4461 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4462 4463 switch (adev->asic_type) { 4464 #if defined(CONFIG_DRM_AMD_DC_SI) 4465 case CHIP_TAHITI: 4466 case CHIP_PITCAIRN: 4467 case CHIP_VERDE: 4468 adev->mode_info.num_crtc = 6; 4469 adev->mode_info.num_hpd = 6; 4470 adev->mode_info.num_dig = 6; 4471 break; 4472 case CHIP_OLAND: 4473 adev->mode_info.num_crtc = 2; 4474 adev->mode_info.num_hpd = 2; 4475 adev->mode_info.num_dig = 2; 4476 break; 4477 #endif 4478 case CHIP_BONAIRE: 4479 case CHIP_HAWAII: 4480 adev->mode_info.num_crtc = 6; 4481 adev->mode_info.num_hpd = 6; 4482 adev->mode_info.num_dig = 6; 4483 break; 4484 case CHIP_KAVERI: 4485 adev->mode_info.num_crtc = 4; 4486 adev->mode_info.num_hpd = 6; 4487 adev->mode_info.num_dig = 7; 4488 break; 4489 case CHIP_KABINI: 4490 case CHIP_MULLINS: 4491 adev->mode_info.num_crtc = 2; 4492 adev->mode_info.num_hpd = 6; 4493 adev->mode_info.num_dig = 6; 4494 break; 4495 case CHIP_FIJI: 4496 case CHIP_TONGA: 4497 adev->mode_info.num_crtc = 6; 4498 adev->mode_info.num_hpd = 6; 4499 adev->mode_info.num_dig = 7; 4500 break; 4501 case CHIP_CARRIZO: 4502 adev->mode_info.num_crtc = 3; 4503 adev->mode_info.num_hpd = 6; 4504 adev->mode_info.num_dig = 9; 4505 break; 4506 case CHIP_STONEY: 4507 adev->mode_info.num_crtc = 2; 4508 adev->mode_info.num_hpd = 6; 4509 adev->mode_info.num_dig = 9; 4510 break; 4511 case CHIP_POLARIS11: 4512 case CHIP_POLARIS12: 4513 adev->mode_info.num_crtc = 5; 4514 adev->mode_info.num_hpd = 5; 4515 adev->mode_info.num_dig = 5; 4516 break; 4517 case CHIP_POLARIS10: 4518 case CHIP_VEGAM: 4519 adev->mode_info.num_crtc = 6; 4520 adev->mode_info.num_hpd = 6; 4521 adev->mode_info.num_dig = 6; 4522 break; 4523 case CHIP_VEGA10: 4524 case CHIP_VEGA12: 4525 case CHIP_VEGA20: 4526 adev->mode_info.num_crtc = 6; 4527 adev->mode_info.num_hpd = 6; 4528 adev->mode_info.num_dig = 6; 4529 break; 4530 default: 4531 4532 switch (adev->ip_versions[DCE_HWIP][0]) { 4533 case IP_VERSION(2, 0, 2): 4534 case IP_VERSION(3, 0, 0): 4535 adev->mode_info.num_crtc = 6; 4536 adev->mode_info.num_hpd = 6; 4537 adev->mode_info.num_dig = 6; 4538 break; 4539 case IP_VERSION(2, 0, 0): 4540 case IP_VERSION(3, 0, 2): 4541 adev->mode_info.num_crtc = 5; 4542 adev->mode_info.num_hpd = 5; 4543 adev->mode_info.num_dig = 5; 4544 break; 4545 case IP_VERSION(2, 0, 3): 4546 case IP_VERSION(3, 0, 3): 4547 adev->mode_info.num_crtc = 2; 4548 adev->mode_info.num_hpd = 2; 4549 adev->mode_info.num_dig = 2; 4550 break; 4551 case IP_VERSION(1, 0, 0): 4552 case IP_VERSION(1, 0, 1): 4553 case IP_VERSION(3, 0, 1): 4554 case IP_VERSION(2, 1, 0): 4555 case IP_VERSION(3, 1, 2): 4556 case IP_VERSION(3, 1, 3): 4557 case IP_VERSION(3, 1, 5): 4558 case IP_VERSION(3, 1, 6): 4559 adev->mode_info.num_crtc = 4; 4560 adev->mode_info.num_hpd = 4; 4561 adev->mode_info.num_dig = 4; 4562 break; 4563 default: 4564 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4565 adev->ip_versions[DCE_HWIP][0]); 4566 return -EINVAL; 4567 } 4568 break; 4569 } 4570 4571 amdgpu_dm_set_irq_funcs(adev); 4572 4573 if (adev->mode_info.funcs == NULL) 4574 adev->mode_info.funcs = &dm_display_funcs; 4575 4576 /* 4577 * Note: Do NOT change adev->audio_endpt_rreg and 4578 * adev->audio_endpt_wreg because they are initialised in 4579 * amdgpu_device_init() 4580 */ 4581 #if defined(CONFIG_DEBUG_KERNEL_DC) 4582 device_create_file( 4583 adev_to_drm(adev)->dev, 4584 &dev_attr_s3_debug); 4585 #endif 4586 4587 return 0; 4588 } 4589 4590 static bool modeset_required(struct drm_crtc_state *crtc_state, 4591 struct dc_stream_state *new_stream, 4592 struct dc_stream_state *old_stream) 4593 { 4594 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4595 } 4596 4597 static bool modereset_required(struct drm_crtc_state *crtc_state) 4598 { 4599 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4600 } 4601 4602 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4603 { 4604 drm_encoder_cleanup(encoder); 4605 kfree(encoder); 4606 } 4607 4608 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4609 .destroy = amdgpu_dm_encoder_destroy, 4610 }; 4611 4612 4613 static void get_min_max_dc_plane_scaling(struct drm_device *dev, 4614 struct drm_framebuffer *fb, 4615 int *min_downscale, int *max_upscale) 4616 { 4617 struct amdgpu_device *adev = drm_to_adev(dev); 4618 struct dc *dc = adev->dm.dc; 4619 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 4620 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 4621 4622 switch (fb->format->format) { 4623 case DRM_FORMAT_P010: 4624 case DRM_FORMAT_NV12: 4625 case DRM_FORMAT_NV21: 4626 *max_upscale = plane_cap->max_upscale_factor.nv12; 4627 *min_downscale = plane_cap->max_downscale_factor.nv12; 4628 break; 4629 4630 case DRM_FORMAT_XRGB16161616F: 4631 case DRM_FORMAT_ARGB16161616F: 4632 case DRM_FORMAT_XBGR16161616F: 4633 case DRM_FORMAT_ABGR16161616F: 4634 *max_upscale = plane_cap->max_upscale_factor.fp16; 4635 *min_downscale = plane_cap->max_downscale_factor.fp16; 4636 break; 4637 4638 default: 4639 *max_upscale = plane_cap->max_upscale_factor.argb8888; 4640 *min_downscale = plane_cap->max_downscale_factor.argb8888; 4641 break; 4642 } 4643 4644 /* 4645 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 4646 * scaling factor of 1.0 == 1000 units. 4647 */ 4648 if (*max_upscale == 1) 4649 *max_upscale = 1000; 4650 4651 if (*min_downscale == 1) 4652 *min_downscale = 1000; 4653 } 4654 4655 4656 static int fill_dc_scaling_info(struct amdgpu_device *adev, 4657 const struct drm_plane_state *state, 4658 struct dc_scaling_info *scaling_info) 4659 { 4660 int scale_w, scale_h, min_downscale, max_upscale; 4661 4662 memset(scaling_info, 0, sizeof(*scaling_info)); 4663 4664 /* Source is fixed 16.16 but we ignore mantissa for now... */ 4665 scaling_info->src_rect.x = state->src_x >> 16; 4666 scaling_info->src_rect.y = state->src_y >> 16; 4667 4668 /* 4669 * For reasons we don't (yet) fully understand a non-zero 4670 * src_y coordinate into an NV12 buffer can cause a 4671 * system hang on DCN1x. 4672 * To avoid hangs (and maybe be overly cautious) 4673 * let's reject both non-zero src_x and src_y. 4674 * 4675 * We currently know of only one use-case to reproduce a 4676 * scenario with non-zero src_x and src_y for NV12, which 4677 * is to gesture the YouTube Android app into full screen 4678 * on ChromeOS. 4679 */ 4680 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 4681 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && 4682 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 4683 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 4684 return -EINVAL; 4685 4686 scaling_info->src_rect.width = state->src_w >> 16; 4687 if (scaling_info->src_rect.width == 0) 4688 return -EINVAL; 4689 4690 scaling_info->src_rect.height = state->src_h >> 16; 4691 if (scaling_info->src_rect.height == 0) 4692 return -EINVAL; 4693 4694 scaling_info->dst_rect.x = state->crtc_x; 4695 scaling_info->dst_rect.y = state->crtc_y; 4696 4697 if (state->crtc_w == 0) 4698 return -EINVAL; 4699 4700 scaling_info->dst_rect.width = state->crtc_w; 4701 4702 if (state->crtc_h == 0) 4703 return -EINVAL; 4704 4705 scaling_info->dst_rect.height = state->crtc_h; 4706 4707 /* DRM doesn't specify clipping on destination output. */ 4708 scaling_info->clip_rect = scaling_info->dst_rect; 4709 4710 /* Validate scaling per-format with DC plane caps */ 4711 if (state->plane && state->plane->dev && state->fb) { 4712 get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 4713 &min_downscale, &max_upscale); 4714 } else { 4715 min_downscale = 250; 4716 max_upscale = 16000; 4717 } 4718 4719 scale_w = scaling_info->dst_rect.width * 1000 / 4720 scaling_info->src_rect.width; 4721 4722 if (scale_w < min_downscale || scale_w > max_upscale) 4723 return -EINVAL; 4724 4725 scale_h = scaling_info->dst_rect.height * 1000 / 4726 scaling_info->src_rect.height; 4727 4728 if (scale_h < min_downscale || scale_h > max_upscale) 4729 return -EINVAL; 4730 4731 /* 4732 * The "scaling_quality" can be ignored for now, quality = 0 has DC 4733 * assume reasonable defaults based on the format. 4734 */ 4735 4736 return 0; 4737 } 4738 4739 static void 4740 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 4741 uint64_t tiling_flags) 4742 { 4743 /* Fill GFX8 params */ 4744 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 4745 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 4746 4747 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 4748 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 4749 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 4750 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 4751 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 4752 4753 /* XXX fix me for VI */ 4754 tiling_info->gfx8.num_banks = num_banks; 4755 tiling_info->gfx8.array_mode = 4756 DC_ARRAY_2D_TILED_THIN1; 4757 tiling_info->gfx8.tile_split = tile_split; 4758 tiling_info->gfx8.bank_width = bankw; 4759 tiling_info->gfx8.bank_height = bankh; 4760 tiling_info->gfx8.tile_aspect = mtaspect; 4761 tiling_info->gfx8.tile_mode = 4762 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 4763 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 4764 == DC_ARRAY_1D_TILED_THIN1) { 4765 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 4766 } 4767 4768 tiling_info->gfx8.pipe_config = 4769 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 4770 } 4771 4772 static void 4773 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 4774 union dc_tiling_info *tiling_info) 4775 { 4776 tiling_info->gfx9.num_pipes = 4777 adev->gfx.config.gb_addr_config_fields.num_pipes; 4778 tiling_info->gfx9.num_banks = 4779 adev->gfx.config.gb_addr_config_fields.num_banks; 4780 tiling_info->gfx9.pipe_interleave = 4781 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 4782 tiling_info->gfx9.num_shader_engines = 4783 adev->gfx.config.gb_addr_config_fields.num_se; 4784 tiling_info->gfx9.max_compressed_frags = 4785 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 4786 tiling_info->gfx9.num_rb_per_se = 4787 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 4788 tiling_info->gfx9.shaderEnable = 1; 4789 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 4790 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 4791 } 4792 4793 static int 4794 validate_dcc(struct amdgpu_device *adev, 4795 const enum surface_pixel_format format, 4796 const enum dc_rotation_angle rotation, 4797 const union dc_tiling_info *tiling_info, 4798 const struct dc_plane_dcc_param *dcc, 4799 const struct dc_plane_address *address, 4800 const struct plane_size *plane_size) 4801 { 4802 struct dc *dc = adev->dm.dc; 4803 struct dc_dcc_surface_param input; 4804 struct dc_surface_dcc_cap output; 4805 4806 memset(&input, 0, sizeof(input)); 4807 memset(&output, 0, sizeof(output)); 4808 4809 if (!dcc->enable) 4810 return 0; 4811 4812 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 4813 !dc->cap_funcs.get_dcc_compression_cap) 4814 return -EINVAL; 4815 4816 input.format = format; 4817 input.surface_size.width = plane_size->surface_size.width; 4818 input.surface_size.height = plane_size->surface_size.height; 4819 input.swizzle_mode = tiling_info->gfx9.swizzle; 4820 4821 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 4822 input.scan = SCAN_DIRECTION_HORIZONTAL; 4823 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 4824 input.scan = SCAN_DIRECTION_VERTICAL; 4825 4826 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 4827 return -EINVAL; 4828 4829 if (!output.capable) 4830 return -EINVAL; 4831 4832 if (dcc->independent_64b_blks == 0 && 4833 output.grph.rgb.independent_64b_blks != 0) 4834 return -EINVAL; 4835 4836 return 0; 4837 } 4838 4839 static bool 4840 modifier_has_dcc(uint64_t modifier) 4841 { 4842 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 4843 } 4844 4845 static unsigned 4846 modifier_gfx9_swizzle_mode(uint64_t modifier) 4847 { 4848 if (modifier == DRM_FORMAT_MOD_LINEAR) 4849 return 0; 4850 4851 return AMD_FMT_MOD_GET(TILE, modifier); 4852 } 4853 4854 static const struct drm_format_info * 4855 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 4856 { 4857 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 4858 } 4859 4860 static void 4861 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 4862 union dc_tiling_info *tiling_info, 4863 uint64_t modifier) 4864 { 4865 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 4866 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 4867 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 4868 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); 4869 4870 fill_gfx9_tiling_info_from_device(adev, tiling_info); 4871 4872 if (!IS_AMD_FMT_MOD(modifier)) 4873 return; 4874 4875 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 4876 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 4877 4878 if (adev->family >= AMDGPU_FAMILY_NV) { 4879 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 4880 } else { 4881 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 4882 4883 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 4884 } 4885 } 4886 4887 enum dm_micro_swizzle { 4888 MICRO_SWIZZLE_Z = 0, 4889 MICRO_SWIZZLE_S = 1, 4890 MICRO_SWIZZLE_D = 2, 4891 MICRO_SWIZZLE_R = 3 4892 }; 4893 4894 static bool dm_plane_format_mod_supported(struct drm_plane *plane, 4895 uint32_t format, 4896 uint64_t modifier) 4897 { 4898 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4899 const struct drm_format_info *info = drm_format_info(format); 4900 int i; 4901 4902 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4903 4904 if (!info) 4905 return false; 4906 4907 /* 4908 * We always have to allow these modifiers: 4909 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 4910 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 4911 */ 4912 if (modifier == DRM_FORMAT_MOD_LINEAR || 4913 modifier == DRM_FORMAT_MOD_INVALID) { 4914 return true; 4915 } 4916 4917 /* Check that the modifier is on the list of the plane's supported modifiers. */ 4918 for (i = 0; i < plane->modifier_count; i++) { 4919 if (modifier == plane->modifiers[i]) 4920 break; 4921 } 4922 if (i == plane->modifier_count) 4923 return false; 4924 4925 /* 4926 * For D swizzle the canonical modifier depends on the bpp, so check 4927 * it here. 4928 */ 4929 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 4930 adev->family >= AMDGPU_FAMILY_NV) { 4931 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 4932 return false; 4933 } 4934 4935 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 4936 info->cpp[0] < 8) 4937 return false; 4938 4939 if (modifier_has_dcc(modifier)) { 4940 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4941 if (info->cpp[0] != 4) 4942 return false; 4943 /* We support multi-planar formats, but not when combined with 4944 * additional DCC metadata planes. */ 4945 if (info->num_planes > 1) 4946 return false; 4947 } 4948 4949 return true; 4950 } 4951 4952 static void 4953 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 4954 { 4955 if (!*mods) 4956 return; 4957 4958 if (*cap - *size < 1) { 4959 uint64_t new_cap = *cap * 2; 4960 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 4961 4962 if (!new_mods) { 4963 kfree(*mods); 4964 *mods = NULL; 4965 return; 4966 } 4967 4968 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 4969 kfree(*mods); 4970 *mods = new_mods; 4971 *cap = new_cap; 4972 } 4973 4974 (*mods)[*size] = mod; 4975 *size += 1; 4976 } 4977 4978 static void 4979 add_gfx9_modifiers(const struct amdgpu_device *adev, 4980 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4981 { 4982 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4983 int pipe_xor_bits = min(8, pipes + 4984 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 4985 int bank_xor_bits = min(8 - pipe_xor_bits, 4986 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 4987 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 4988 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 4989 4990 4991 if (adev->family == AMDGPU_FAMILY_RV) { 4992 /* Raven2 and later */ 4993 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 4994 4995 /* 4996 * No _D DCC swizzles yet because we only allow 32bpp, which 4997 * doesn't support _D on DCN 4998 */ 4999 5000 if (has_constant_encode) { 5001 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5002 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5003 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5004 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5005 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5006 AMD_FMT_MOD_SET(DCC, 1) | 5007 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5008 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5009 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 5010 } 5011 5012 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5013 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5014 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5015 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5016 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5017 AMD_FMT_MOD_SET(DCC, 1) | 5018 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5019 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5020 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 5021 5022 if (has_constant_encode) { 5023 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5024 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5025 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5026 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5027 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5028 AMD_FMT_MOD_SET(DCC, 1) | 5029 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5030 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5031 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5032 5033 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5034 AMD_FMT_MOD_SET(RB, rb) | 5035 AMD_FMT_MOD_SET(PIPE, pipes)); 5036 } 5037 5038 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5041 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5042 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5043 AMD_FMT_MOD_SET(DCC, 1) | 5044 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5045 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5046 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5047 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 5048 AMD_FMT_MOD_SET(RB, rb) | 5049 AMD_FMT_MOD_SET(PIPE, pipes)); 5050 } 5051 5052 /* 5053 * Only supported for 64bpp on Raven, will be filtered on format in 5054 * dm_plane_format_mod_supported. 5055 */ 5056 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5057 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 5058 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5059 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5060 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 5061 5062 if (adev->family == AMDGPU_FAMILY_RV) { 5063 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5067 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 5068 } 5069 5070 /* 5071 * Only supported for 64bpp on Raven, will be filtered on format in 5072 * dm_plane_format_mod_supported. 5073 */ 5074 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5077 5078 if (adev->family == AMDGPU_FAMILY_RV) { 5079 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5080 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5081 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5082 } 5083 } 5084 5085 static void 5086 add_gfx10_1_modifiers(const struct amdgpu_device *adev, 5087 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5088 { 5089 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 5090 5091 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5092 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5093 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5094 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5095 AMD_FMT_MOD_SET(DCC, 1) | 5096 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5097 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5098 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5099 5100 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5101 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5102 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5103 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5104 AMD_FMT_MOD_SET(DCC, 1) | 5105 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5106 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5107 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5108 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5109 5110 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5111 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5112 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5113 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 5114 5115 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5116 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5117 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5118 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 5119 5120 5121 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 5122 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5123 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5124 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5125 5126 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5127 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5128 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5129 } 5130 5131 static void 5132 add_gfx10_3_modifiers(const struct amdgpu_device *adev, 5133 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5134 { 5135 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 5136 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 5137 5138 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5139 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5140 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5141 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5142 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5143 AMD_FMT_MOD_SET(DCC, 1) | 5144 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5145 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5146 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5147 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5148 5149 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5152 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5153 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5154 AMD_FMT_MOD_SET(DCC, 1) | 5155 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5156 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5157 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 5158 5159 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5160 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5161 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5162 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5163 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5164 AMD_FMT_MOD_SET(DCC, 1) | 5165 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5166 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5167 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5168 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5169 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5170 5171 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5172 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5173 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5174 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5175 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5176 AMD_FMT_MOD_SET(DCC, 1) | 5177 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5178 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5179 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5180 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 5181 5182 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5183 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5184 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5185 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5186 AMD_FMT_MOD_SET(PACKERS, pkrs)); 5187 5188 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5189 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5190 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5191 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5192 AMD_FMT_MOD_SET(PACKERS, pkrs)); 5193 5194 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 5195 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5196 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5197 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5198 5199 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5202 } 5203 5204 static int 5205 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 5206 { 5207 uint64_t size = 0, capacity = 128; 5208 *mods = NULL; 5209 5210 /* We have not hooked up any pre-GFX9 modifiers. */ 5211 if (adev->family < AMDGPU_FAMILY_AI) 5212 return 0; 5213 5214 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 5215 5216 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 5217 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5218 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5219 return *mods ? 0 : -ENOMEM; 5220 } 5221 5222 switch (adev->family) { 5223 case AMDGPU_FAMILY_AI: 5224 case AMDGPU_FAMILY_RV: 5225 add_gfx9_modifiers(adev, mods, &size, &capacity); 5226 break; 5227 case AMDGPU_FAMILY_NV: 5228 case AMDGPU_FAMILY_VGH: 5229 case AMDGPU_FAMILY_YC: 5230 case AMDGPU_FAMILY_GC_10_3_6: 5231 case AMDGPU_FAMILY_GC_10_3_7: 5232 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 5233 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 5234 else 5235 add_gfx10_1_modifiers(adev, mods, &size, &capacity); 5236 break; 5237 } 5238 5239 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5240 5241 /* INVALID marks the end of the list. */ 5242 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5243 5244 if (!*mods) 5245 return -ENOMEM; 5246 5247 return 0; 5248 } 5249 5250 static int 5251 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 5252 const struct amdgpu_framebuffer *afb, 5253 const enum surface_pixel_format format, 5254 const enum dc_rotation_angle rotation, 5255 const struct plane_size *plane_size, 5256 union dc_tiling_info *tiling_info, 5257 struct dc_plane_dcc_param *dcc, 5258 struct dc_plane_address *address, 5259 const bool force_disable_dcc) 5260 { 5261 const uint64_t modifier = afb->base.modifier; 5262 int ret = 0; 5263 5264 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 5265 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); 5266 5267 if (modifier_has_dcc(modifier) && !force_disable_dcc) { 5268 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 5269 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 5270 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 5271 5272 dcc->enable = 1; 5273 dcc->meta_pitch = afb->base.pitches[1]; 5274 dcc->independent_64b_blks = independent_64b_blks; 5275 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 5276 if (independent_64b_blks && independent_128b_blks) 5277 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 5278 else if (independent_128b_blks) 5279 dcc->dcc_ind_blk = hubp_ind_block_128b; 5280 else if (independent_64b_blks && !independent_128b_blks) 5281 dcc->dcc_ind_blk = hubp_ind_block_64b; 5282 else 5283 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5284 } else { 5285 if (independent_64b_blks) 5286 dcc->dcc_ind_blk = hubp_ind_block_64b; 5287 else 5288 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5289 } 5290 5291 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 5292 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 5293 } 5294 5295 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 5296 if (ret) 5297 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); 5298 5299 return ret; 5300 } 5301 5302 static int 5303 fill_plane_buffer_attributes(struct amdgpu_device *adev, 5304 const struct amdgpu_framebuffer *afb, 5305 const enum surface_pixel_format format, 5306 const enum dc_rotation_angle rotation, 5307 const uint64_t tiling_flags, 5308 union dc_tiling_info *tiling_info, 5309 struct plane_size *plane_size, 5310 struct dc_plane_dcc_param *dcc, 5311 struct dc_plane_address *address, 5312 bool tmz_surface, 5313 bool force_disable_dcc) 5314 { 5315 const struct drm_framebuffer *fb = &afb->base; 5316 int ret; 5317 5318 memset(tiling_info, 0, sizeof(*tiling_info)); 5319 memset(plane_size, 0, sizeof(*plane_size)); 5320 memset(dcc, 0, sizeof(*dcc)); 5321 memset(address, 0, sizeof(*address)); 5322 5323 address->tmz_surface = tmz_surface; 5324 5325 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 5326 uint64_t addr = afb->address + fb->offsets[0]; 5327 5328 plane_size->surface_size.x = 0; 5329 plane_size->surface_size.y = 0; 5330 plane_size->surface_size.width = fb->width; 5331 plane_size->surface_size.height = fb->height; 5332 plane_size->surface_pitch = 5333 fb->pitches[0] / fb->format->cpp[0]; 5334 5335 address->type = PLN_ADDR_TYPE_GRAPHICS; 5336 address->grph.addr.low_part = lower_32_bits(addr); 5337 address->grph.addr.high_part = upper_32_bits(addr); 5338 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 5339 uint64_t luma_addr = afb->address + fb->offsets[0]; 5340 uint64_t chroma_addr = afb->address + fb->offsets[1]; 5341 5342 plane_size->surface_size.x = 0; 5343 plane_size->surface_size.y = 0; 5344 plane_size->surface_size.width = fb->width; 5345 plane_size->surface_size.height = fb->height; 5346 plane_size->surface_pitch = 5347 fb->pitches[0] / fb->format->cpp[0]; 5348 5349 plane_size->chroma_size.x = 0; 5350 plane_size->chroma_size.y = 0; 5351 /* TODO: set these based on surface format */ 5352 plane_size->chroma_size.width = fb->width / 2; 5353 plane_size->chroma_size.height = fb->height / 2; 5354 5355 plane_size->chroma_pitch = 5356 fb->pitches[1] / fb->format->cpp[1]; 5357 5358 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 5359 address->video_progressive.luma_addr.low_part = 5360 lower_32_bits(luma_addr); 5361 address->video_progressive.luma_addr.high_part = 5362 upper_32_bits(luma_addr); 5363 address->video_progressive.chroma_addr.low_part = 5364 lower_32_bits(chroma_addr); 5365 address->video_progressive.chroma_addr.high_part = 5366 upper_32_bits(chroma_addr); 5367 } 5368 5369 if (adev->family >= AMDGPU_FAMILY_AI) { 5370 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 5371 rotation, plane_size, 5372 tiling_info, dcc, 5373 address, 5374 force_disable_dcc); 5375 if (ret) 5376 return ret; 5377 } else { 5378 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 5379 } 5380 5381 return 0; 5382 } 5383 5384 static void 5385 fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 5386 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 5387 bool *global_alpha, int *global_alpha_value) 5388 { 5389 *per_pixel_alpha = false; 5390 *pre_multiplied_alpha = true; 5391 *global_alpha = false; 5392 *global_alpha_value = 0xff; 5393 5394 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 5395 return; 5396 5397 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 5398 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 5399 static const uint32_t alpha_formats[] = { 5400 DRM_FORMAT_ARGB8888, 5401 DRM_FORMAT_RGBA8888, 5402 DRM_FORMAT_ABGR8888, 5403 }; 5404 uint32_t format = plane_state->fb->format->format; 5405 unsigned int i; 5406 5407 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 5408 if (format == alpha_formats[i]) { 5409 *per_pixel_alpha = true; 5410 break; 5411 } 5412 } 5413 5414 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 5415 *pre_multiplied_alpha = false; 5416 } 5417 5418 if (plane_state->alpha < 0xffff) { 5419 *global_alpha = true; 5420 *global_alpha_value = plane_state->alpha >> 8; 5421 } 5422 } 5423 5424 static int 5425 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5426 const enum surface_pixel_format format, 5427 enum dc_color_space *color_space) 5428 { 5429 bool full_range; 5430 5431 *color_space = COLOR_SPACE_SRGB; 5432 5433 /* DRM color properties only affect non-RGB formats. */ 5434 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5435 return 0; 5436 5437 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5438 5439 switch (plane_state->color_encoding) { 5440 case DRM_COLOR_YCBCR_BT601: 5441 if (full_range) 5442 *color_space = COLOR_SPACE_YCBCR601; 5443 else 5444 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5445 break; 5446 5447 case DRM_COLOR_YCBCR_BT709: 5448 if (full_range) 5449 *color_space = COLOR_SPACE_YCBCR709; 5450 else 5451 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5452 break; 5453 5454 case DRM_COLOR_YCBCR_BT2020: 5455 if (full_range) 5456 *color_space = COLOR_SPACE_2020_YCBCR; 5457 else 5458 return -EINVAL; 5459 break; 5460 5461 default: 5462 return -EINVAL; 5463 } 5464 5465 return 0; 5466 } 5467 5468 static int 5469 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5470 const struct drm_plane_state *plane_state, 5471 const uint64_t tiling_flags, 5472 struct dc_plane_info *plane_info, 5473 struct dc_plane_address *address, 5474 bool tmz_surface, 5475 bool force_disable_dcc) 5476 { 5477 const struct drm_framebuffer *fb = plane_state->fb; 5478 const struct amdgpu_framebuffer *afb = 5479 to_amdgpu_framebuffer(plane_state->fb); 5480 int ret; 5481 5482 memset(plane_info, 0, sizeof(*plane_info)); 5483 5484 switch (fb->format->format) { 5485 case DRM_FORMAT_C8: 5486 plane_info->format = 5487 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5488 break; 5489 case DRM_FORMAT_RGB565: 5490 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5491 break; 5492 case DRM_FORMAT_XRGB8888: 5493 case DRM_FORMAT_ARGB8888: 5494 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5495 break; 5496 case DRM_FORMAT_XRGB2101010: 5497 case DRM_FORMAT_ARGB2101010: 5498 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5499 break; 5500 case DRM_FORMAT_XBGR2101010: 5501 case DRM_FORMAT_ABGR2101010: 5502 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5503 break; 5504 case DRM_FORMAT_XBGR8888: 5505 case DRM_FORMAT_ABGR8888: 5506 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5507 break; 5508 case DRM_FORMAT_NV21: 5509 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5510 break; 5511 case DRM_FORMAT_NV12: 5512 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5513 break; 5514 case DRM_FORMAT_P010: 5515 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5516 break; 5517 case DRM_FORMAT_XRGB16161616F: 5518 case DRM_FORMAT_ARGB16161616F: 5519 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5520 break; 5521 case DRM_FORMAT_XBGR16161616F: 5522 case DRM_FORMAT_ABGR16161616F: 5523 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5524 break; 5525 case DRM_FORMAT_XRGB16161616: 5526 case DRM_FORMAT_ARGB16161616: 5527 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5528 break; 5529 case DRM_FORMAT_XBGR16161616: 5530 case DRM_FORMAT_ABGR16161616: 5531 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5532 break; 5533 default: 5534 DRM_ERROR( 5535 "Unsupported screen format %p4cc\n", 5536 &fb->format->format); 5537 return -EINVAL; 5538 } 5539 5540 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5541 case DRM_MODE_ROTATE_0: 5542 plane_info->rotation = ROTATION_ANGLE_0; 5543 break; 5544 case DRM_MODE_ROTATE_90: 5545 plane_info->rotation = ROTATION_ANGLE_90; 5546 break; 5547 case DRM_MODE_ROTATE_180: 5548 plane_info->rotation = ROTATION_ANGLE_180; 5549 break; 5550 case DRM_MODE_ROTATE_270: 5551 plane_info->rotation = ROTATION_ANGLE_270; 5552 break; 5553 default: 5554 plane_info->rotation = ROTATION_ANGLE_0; 5555 break; 5556 } 5557 5558 plane_info->visible = true; 5559 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5560 5561 plane_info->layer_index = 0; 5562 5563 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5564 &plane_info->color_space); 5565 if (ret) 5566 return ret; 5567 5568 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, 5569 plane_info->rotation, tiling_flags, 5570 &plane_info->tiling_info, 5571 &plane_info->plane_size, 5572 &plane_info->dcc, address, tmz_surface, 5573 force_disable_dcc); 5574 if (ret) 5575 return ret; 5576 5577 fill_blending_from_plane_state( 5578 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5579 &plane_info->global_alpha, &plane_info->global_alpha_value); 5580 5581 return 0; 5582 } 5583 5584 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5585 struct dc_plane_state *dc_plane_state, 5586 struct drm_plane_state *plane_state, 5587 struct drm_crtc_state *crtc_state) 5588 { 5589 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5590 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5591 struct dc_scaling_info scaling_info; 5592 struct dc_plane_info plane_info; 5593 int ret; 5594 bool force_disable_dcc = false; 5595 5596 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); 5597 if (ret) 5598 return ret; 5599 5600 dc_plane_state->src_rect = scaling_info.src_rect; 5601 dc_plane_state->dst_rect = scaling_info.dst_rect; 5602 dc_plane_state->clip_rect = scaling_info.clip_rect; 5603 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5604 5605 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5606 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5607 afb->tiling_flags, 5608 &plane_info, 5609 &dc_plane_state->address, 5610 afb->tmz_surface, 5611 force_disable_dcc); 5612 if (ret) 5613 return ret; 5614 5615 dc_plane_state->format = plane_info.format; 5616 dc_plane_state->color_space = plane_info.color_space; 5617 dc_plane_state->format = plane_info.format; 5618 dc_plane_state->plane_size = plane_info.plane_size; 5619 dc_plane_state->rotation = plane_info.rotation; 5620 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5621 dc_plane_state->stereo_format = plane_info.stereo_format; 5622 dc_plane_state->tiling_info = plane_info.tiling_info; 5623 dc_plane_state->visible = plane_info.visible; 5624 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5625 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5626 dc_plane_state->global_alpha = plane_info.global_alpha; 5627 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5628 dc_plane_state->dcc = plane_info.dcc; 5629 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 5630 dc_plane_state->flip_int_enabled = true; 5631 5632 /* 5633 * Always set input transfer function, since plane state is refreshed 5634 * every time. 5635 */ 5636 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5637 if (ret) 5638 return ret; 5639 5640 return 0; 5641 } 5642 5643 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5644 const struct dm_connector_state *dm_state, 5645 struct dc_stream_state *stream) 5646 { 5647 enum amdgpu_rmx_type rmx_type; 5648 5649 struct rect src = { 0 }; /* viewport in composition space*/ 5650 struct rect dst = { 0 }; /* stream addressable area */ 5651 5652 /* no mode. nothing to be done */ 5653 if (!mode) 5654 return; 5655 5656 /* Full screen scaling by default */ 5657 src.width = mode->hdisplay; 5658 src.height = mode->vdisplay; 5659 dst.width = stream->timing.h_addressable; 5660 dst.height = stream->timing.v_addressable; 5661 5662 if (dm_state) { 5663 rmx_type = dm_state->scaling; 5664 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5665 if (src.width * dst.height < 5666 src.height * dst.width) { 5667 /* height needs less upscaling/more downscaling */ 5668 dst.width = src.width * 5669 dst.height / src.height; 5670 } else { 5671 /* width needs less upscaling/more downscaling */ 5672 dst.height = src.height * 5673 dst.width / src.width; 5674 } 5675 } else if (rmx_type == RMX_CENTER) { 5676 dst = src; 5677 } 5678 5679 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5680 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5681 5682 if (dm_state->underscan_enable) { 5683 dst.x += dm_state->underscan_hborder / 2; 5684 dst.y += dm_state->underscan_vborder / 2; 5685 dst.width -= dm_state->underscan_hborder; 5686 dst.height -= dm_state->underscan_vborder; 5687 } 5688 } 5689 5690 stream->src = src; 5691 stream->dst = dst; 5692 5693 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5694 dst.x, dst.y, dst.width, dst.height); 5695 5696 } 5697 5698 static enum dc_color_depth 5699 convert_color_depth_from_display_info(const struct drm_connector *connector, 5700 bool is_y420, int requested_bpc) 5701 { 5702 uint8_t bpc; 5703 5704 if (is_y420) { 5705 bpc = 8; 5706 5707 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5708 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5709 bpc = 16; 5710 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5711 bpc = 12; 5712 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5713 bpc = 10; 5714 } else { 5715 bpc = (uint8_t)connector->display_info.bpc; 5716 /* Assume 8 bpc by default if no bpc is specified. */ 5717 bpc = bpc ? bpc : 8; 5718 } 5719 5720 if (requested_bpc > 0) { 5721 /* 5722 * Cap display bpc based on the user requested value. 5723 * 5724 * The value for state->max_bpc may not correctly updated 5725 * depending on when the connector gets added to the state 5726 * or if this was called outside of atomic check, so it 5727 * can't be used directly. 5728 */ 5729 bpc = min_t(u8, bpc, requested_bpc); 5730 5731 /* Round down to the nearest even number. */ 5732 bpc = bpc - (bpc & 1); 5733 } 5734 5735 switch (bpc) { 5736 case 0: 5737 /* 5738 * Temporary Work around, DRM doesn't parse color depth for 5739 * EDID revision before 1.4 5740 * TODO: Fix edid parsing 5741 */ 5742 return COLOR_DEPTH_888; 5743 case 6: 5744 return COLOR_DEPTH_666; 5745 case 8: 5746 return COLOR_DEPTH_888; 5747 case 10: 5748 return COLOR_DEPTH_101010; 5749 case 12: 5750 return COLOR_DEPTH_121212; 5751 case 14: 5752 return COLOR_DEPTH_141414; 5753 case 16: 5754 return COLOR_DEPTH_161616; 5755 default: 5756 return COLOR_DEPTH_UNDEFINED; 5757 } 5758 } 5759 5760 static enum dc_aspect_ratio 5761 get_aspect_ratio(const struct drm_display_mode *mode_in) 5762 { 5763 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5764 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5765 } 5766 5767 static enum dc_color_space 5768 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 5769 { 5770 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5771 5772 switch (dc_crtc_timing->pixel_encoding) { 5773 case PIXEL_ENCODING_YCBCR422: 5774 case PIXEL_ENCODING_YCBCR444: 5775 case PIXEL_ENCODING_YCBCR420: 5776 { 5777 /* 5778 * 27030khz is the separation point between HDTV and SDTV 5779 * according to HDMI spec, we use YCbCr709 and YCbCr601 5780 * respectively 5781 */ 5782 if (dc_crtc_timing->pix_clk_100hz > 270300) { 5783 if (dc_crtc_timing->flags.Y_ONLY) 5784 color_space = 5785 COLOR_SPACE_YCBCR709_LIMITED; 5786 else 5787 color_space = COLOR_SPACE_YCBCR709; 5788 } else { 5789 if (dc_crtc_timing->flags.Y_ONLY) 5790 color_space = 5791 COLOR_SPACE_YCBCR601_LIMITED; 5792 else 5793 color_space = COLOR_SPACE_YCBCR601; 5794 } 5795 5796 } 5797 break; 5798 case PIXEL_ENCODING_RGB: 5799 color_space = COLOR_SPACE_SRGB; 5800 break; 5801 5802 default: 5803 WARN_ON(1); 5804 break; 5805 } 5806 5807 return color_space; 5808 } 5809 5810 static bool adjust_colour_depth_from_display_info( 5811 struct dc_crtc_timing *timing_out, 5812 const struct drm_display_info *info) 5813 { 5814 enum dc_color_depth depth = timing_out->display_color_depth; 5815 int normalized_clk; 5816 do { 5817 normalized_clk = timing_out->pix_clk_100hz / 10; 5818 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5819 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5820 normalized_clk /= 2; 5821 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5822 switch (depth) { 5823 case COLOR_DEPTH_888: 5824 break; 5825 case COLOR_DEPTH_101010: 5826 normalized_clk = (normalized_clk * 30) / 24; 5827 break; 5828 case COLOR_DEPTH_121212: 5829 normalized_clk = (normalized_clk * 36) / 24; 5830 break; 5831 case COLOR_DEPTH_161616: 5832 normalized_clk = (normalized_clk * 48) / 24; 5833 break; 5834 default: 5835 /* The above depths are the only ones valid for HDMI. */ 5836 return false; 5837 } 5838 if (normalized_clk <= info->max_tmds_clock) { 5839 timing_out->display_color_depth = depth; 5840 return true; 5841 } 5842 } while (--depth > COLOR_DEPTH_666); 5843 return false; 5844 } 5845 5846 static void fill_stream_properties_from_drm_display_mode( 5847 struct dc_stream_state *stream, 5848 const struct drm_display_mode *mode_in, 5849 const struct drm_connector *connector, 5850 const struct drm_connector_state *connector_state, 5851 const struct dc_stream_state *old_stream, 5852 int requested_bpc) 5853 { 5854 struct dc_crtc_timing *timing_out = &stream->timing; 5855 const struct drm_display_info *info = &connector->display_info; 5856 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5857 struct hdmi_vendor_infoframe hv_frame; 5858 struct hdmi_avi_infoframe avi_frame; 5859 5860 memset(&hv_frame, 0, sizeof(hv_frame)); 5861 memset(&avi_frame, 0, sizeof(avi_frame)); 5862 5863 timing_out->h_border_left = 0; 5864 timing_out->h_border_right = 0; 5865 timing_out->v_border_top = 0; 5866 timing_out->v_border_bottom = 0; 5867 /* TODO: un-hardcode */ 5868 if (drm_mode_is_420_only(info, mode_in) 5869 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5870 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5871 else if (drm_mode_is_420_also(info, mode_in) 5872 && aconnector->force_yuv420_output) 5873 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5874 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 5875 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5876 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5877 else 5878 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5879 5880 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5881 timing_out->display_color_depth = convert_color_depth_from_display_info( 5882 connector, 5883 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5884 requested_bpc); 5885 timing_out->scan_type = SCANNING_TYPE_NODATA; 5886 timing_out->hdmi_vic = 0; 5887 5888 if(old_stream) { 5889 timing_out->vic = old_stream->timing.vic; 5890 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5891 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5892 } else { 5893 timing_out->vic = drm_match_cea_mode(mode_in); 5894 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5895 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5896 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5897 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5898 } 5899 5900 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5901 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5902 timing_out->vic = avi_frame.video_code; 5903 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5904 timing_out->hdmi_vic = hv_frame.vic; 5905 } 5906 5907 if (is_freesync_video_mode(mode_in, aconnector)) { 5908 timing_out->h_addressable = mode_in->hdisplay; 5909 timing_out->h_total = mode_in->htotal; 5910 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5911 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5912 timing_out->v_total = mode_in->vtotal; 5913 timing_out->v_addressable = mode_in->vdisplay; 5914 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5915 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5916 timing_out->pix_clk_100hz = mode_in->clock * 10; 5917 } else { 5918 timing_out->h_addressable = mode_in->crtc_hdisplay; 5919 timing_out->h_total = mode_in->crtc_htotal; 5920 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5921 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5922 timing_out->v_total = mode_in->crtc_vtotal; 5923 timing_out->v_addressable = mode_in->crtc_vdisplay; 5924 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5925 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5926 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5927 } 5928 5929 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5930 5931 stream->output_color_space = get_output_color_space(timing_out); 5932 5933 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5934 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5935 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5936 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5937 drm_mode_is_420_also(info, mode_in) && 5938 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5939 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5940 adjust_colour_depth_from_display_info(timing_out, info); 5941 } 5942 } 5943 } 5944 5945 static void fill_audio_info(struct audio_info *audio_info, 5946 const struct drm_connector *drm_connector, 5947 const struct dc_sink *dc_sink) 5948 { 5949 int i = 0; 5950 int cea_revision = 0; 5951 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5952 5953 audio_info->manufacture_id = edid_caps->manufacturer_id; 5954 audio_info->product_id = edid_caps->product_id; 5955 5956 cea_revision = drm_connector->display_info.cea_rev; 5957 5958 strscpy(audio_info->display_name, 5959 edid_caps->display_name, 5960 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5961 5962 if (cea_revision >= 3) { 5963 audio_info->mode_count = edid_caps->audio_mode_count; 5964 5965 for (i = 0; i < audio_info->mode_count; ++i) { 5966 audio_info->modes[i].format_code = 5967 (enum audio_format_code) 5968 (edid_caps->audio_modes[i].format_code); 5969 audio_info->modes[i].channel_count = 5970 edid_caps->audio_modes[i].channel_count; 5971 audio_info->modes[i].sample_rates.all = 5972 edid_caps->audio_modes[i].sample_rate; 5973 audio_info->modes[i].sample_size = 5974 edid_caps->audio_modes[i].sample_size; 5975 } 5976 } 5977 5978 audio_info->flags.all = edid_caps->speaker_flags; 5979 5980 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5981 if (drm_connector->latency_present[0]) { 5982 audio_info->video_latency = drm_connector->video_latency[0]; 5983 audio_info->audio_latency = drm_connector->audio_latency[0]; 5984 } 5985 5986 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5987 5988 } 5989 5990 static void 5991 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5992 struct drm_display_mode *dst_mode) 5993 { 5994 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5995 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5996 dst_mode->crtc_clock = src_mode->crtc_clock; 5997 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5998 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5999 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6000 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6001 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6002 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6003 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6004 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6005 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6006 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6007 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6008 } 6009 6010 static void 6011 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6012 const struct drm_display_mode *native_mode, 6013 bool scale_enabled) 6014 { 6015 if (scale_enabled) { 6016 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6017 } else if (native_mode->clock == drm_mode->clock && 6018 native_mode->htotal == drm_mode->htotal && 6019 native_mode->vtotal == drm_mode->vtotal) { 6020 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6021 } else { 6022 /* no scaling nor amdgpu inserted, no need to patch */ 6023 } 6024 } 6025 6026 static struct dc_sink * 6027 create_fake_sink(struct amdgpu_dm_connector *aconnector) 6028 { 6029 struct dc_sink_init_data sink_init_data = { 0 }; 6030 struct dc_sink *sink = NULL; 6031 sink_init_data.link = aconnector->dc_link; 6032 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 6033 6034 sink = dc_sink_create(&sink_init_data); 6035 if (!sink) { 6036 DRM_ERROR("Failed to create sink!\n"); 6037 return NULL; 6038 } 6039 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6040 6041 return sink; 6042 } 6043 6044 static void set_multisync_trigger_params( 6045 struct dc_stream_state *stream) 6046 { 6047 struct dc_stream_state *master = NULL; 6048 6049 if (stream->triggered_crtc_reset.enabled) { 6050 master = stream->triggered_crtc_reset.event_source; 6051 stream->triggered_crtc_reset.event = 6052 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6053 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6054 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6055 } 6056 } 6057 6058 static void set_master_stream(struct dc_stream_state *stream_set[], 6059 int stream_count) 6060 { 6061 int j, highest_rfr = 0, master_stream = 0; 6062 6063 for (j = 0; j < stream_count; j++) { 6064 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6065 int refresh_rate = 0; 6066 6067 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6068 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6069 if (refresh_rate > highest_rfr) { 6070 highest_rfr = refresh_rate; 6071 master_stream = j; 6072 } 6073 } 6074 } 6075 for (j = 0; j < stream_count; j++) { 6076 if (stream_set[j]) 6077 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6078 } 6079 } 6080 6081 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6082 { 6083 int i = 0; 6084 struct dc_stream_state *stream; 6085 6086 if (context->stream_count < 2) 6087 return; 6088 for (i = 0; i < context->stream_count ; i++) { 6089 if (!context->streams[i]) 6090 continue; 6091 /* 6092 * TODO: add a function to read AMD VSDB bits and set 6093 * crtc_sync_master.multi_sync_enabled flag 6094 * For now it's set to false 6095 */ 6096 } 6097 6098 set_master_stream(context->streams, context->stream_count); 6099 6100 for (i = 0; i < context->stream_count ; i++) { 6101 stream = context->streams[i]; 6102 6103 if (!stream) 6104 continue; 6105 6106 set_multisync_trigger_params(stream); 6107 } 6108 } 6109 6110 #if defined(CONFIG_DRM_AMD_DC_DCN) 6111 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6112 struct dc_sink *sink, struct dc_stream_state *stream, 6113 struct dsc_dec_dpcd_caps *dsc_caps) 6114 { 6115 stream->timing.flags.DSC = 0; 6116 dsc_caps->is_dsc_supported = false; 6117 6118 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6119 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6120 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6121 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6122 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6123 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6124 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6125 dsc_caps); 6126 } 6127 } 6128 6129 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6130 struct dc_sink *sink, struct dc_stream_state *stream, 6131 struct dsc_dec_dpcd_caps *dsc_caps, 6132 uint32_t max_dsc_target_bpp_limit_override) 6133 { 6134 const struct dc_link_settings *verified_link_cap = NULL; 6135 uint32_t link_bw_in_kbps; 6136 uint32_t edp_min_bpp_x16, edp_max_bpp_x16; 6137 struct dc *dc = sink->ctx->dc; 6138 struct dc_dsc_bw_range bw_range = {0}; 6139 struct dc_dsc_config dsc_cfg = {0}; 6140 6141 verified_link_cap = dc_link_get_link_cap(stream->link); 6142 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6143 edp_min_bpp_x16 = 8 * 16; 6144 edp_max_bpp_x16 = 8 * 16; 6145 6146 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6147 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6148 6149 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6150 edp_min_bpp_x16 = edp_max_bpp_x16; 6151 6152 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6153 dc->debug.dsc_min_slice_height_override, 6154 edp_min_bpp_x16, edp_max_bpp_x16, 6155 dsc_caps, 6156 &stream->timing, 6157 &bw_range)) { 6158 6159 if (bw_range.max_kbps < link_bw_in_kbps) { 6160 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6161 dsc_caps, 6162 dc->debug.dsc_min_slice_height_override, 6163 max_dsc_target_bpp_limit_override, 6164 0, 6165 &stream->timing, 6166 &dsc_cfg)) { 6167 stream->timing.dsc_cfg = dsc_cfg; 6168 stream->timing.flags.DSC = 1; 6169 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6170 } 6171 return; 6172 } 6173 } 6174 6175 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6176 dsc_caps, 6177 dc->debug.dsc_min_slice_height_override, 6178 max_dsc_target_bpp_limit_override, 6179 link_bw_in_kbps, 6180 &stream->timing, 6181 &dsc_cfg)) { 6182 stream->timing.dsc_cfg = dsc_cfg; 6183 stream->timing.flags.DSC = 1; 6184 } 6185 } 6186 6187 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6188 struct dc_sink *sink, struct dc_stream_state *stream, 6189 struct dsc_dec_dpcd_caps *dsc_caps) 6190 { 6191 struct drm_connector *drm_connector = &aconnector->base; 6192 uint32_t link_bandwidth_kbps; 6193 uint32_t max_dsc_target_bpp_limit_override = 0; 6194 struct dc *dc = sink->ctx->dc; 6195 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; 6196 uint32_t dsc_max_supported_bw_in_kbps; 6197 6198 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6199 dc_link_get_link_cap(aconnector->dc_link)); 6200 6201 if (stream->link && stream->link->local_sink) 6202 max_dsc_target_bpp_limit_override = 6203 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; 6204 6205 /* Set DSC policy according to dsc_clock_en */ 6206 dc_dsc_policy_set_enable_dsc_when_not_needed( 6207 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6208 6209 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && 6210 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6211 6212 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6213 6214 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6215 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6216 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6217 dsc_caps, 6218 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 6219 max_dsc_target_bpp_limit_override, 6220 link_bandwidth_kbps, 6221 &stream->timing, 6222 &stream->timing.dsc_cfg)) { 6223 stream->timing.flags.DSC = 1; 6224 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", 6225 __func__, drm_connector->name); 6226 } 6227 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6228 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); 6229 max_supported_bw_in_kbps = link_bandwidth_kbps; 6230 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6231 6232 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6233 max_supported_bw_in_kbps > 0 && 6234 dsc_max_supported_bw_in_kbps > 0) 6235 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6236 dsc_caps, 6237 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 6238 max_dsc_target_bpp_limit_override, 6239 dsc_max_supported_bw_in_kbps, 6240 &stream->timing, 6241 &stream->timing.dsc_cfg)) { 6242 stream->timing.flags.DSC = 1; 6243 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", 6244 __func__, drm_connector->name); 6245 } 6246 } 6247 } 6248 6249 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6250 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6251 stream->timing.flags.DSC = 1; 6252 6253 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6254 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6255 6256 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6257 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6258 6259 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6260 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6261 } 6262 #endif /* CONFIG_DRM_AMD_DC_DCN */ 6263 6264 /** 6265 * DOC: FreeSync Video 6266 * 6267 * When a userspace application wants to play a video, the content follows a 6268 * standard format definition that usually specifies the FPS for that format. 6269 * The below list illustrates some video format and the expected FPS, 6270 * respectively: 6271 * 6272 * - TV/NTSC (23.976 FPS) 6273 * - Cinema (24 FPS) 6274 * - TV/PAL (25 FPS) 6275 * - TV/NTSC (29.97 FPS) 6276 * - TV/NTSC (30 FPS) 6277 * - Cinema HFR (48 FPS) 6278 * - TV/PAL (50 FPS) 6279 * - Commonly used (60 FPS) 6280 * - Multiples of 24 (48,72,96,120 FPS) 6281 * 6282 * The list of standards video format is not huge and can be added to the 6283 * connector modeset list beforehand. With that, userspace can leverage 6284 * FreeSync to extends the front porch in order to attain the target refresh 6285 * rate. Such a switch will happen seamlessly, without screen blanking or 6286 * reprogramming of the output in any other way. If the userspace requests a 6287 * modesetting change compatible with FreeSync modes that only differ in the 6288 * refresh rate, DC will skip the full update and avoid blink during the 6289 * transition. For example, the video player can change the modesetting from 6290 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6291 * causing any display blink. This same concept can be applied to a mode 6292 * setting change. 6293 */ 6294 static struct drm_display_mode * 6295 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6296 bool use_probed_modes) 6297 { 6298 struct drm_display_mode *m, *m_pref = NULL; 6299 u16 current_refresh, highest_refresh; 6300 struct list_head *list_head = use_probed_modes ? 6301 &aconnector->base.probed_modes : 6302 &aconnector->base.modes; 6303 6304 if (aconnector->freesync_vid_base.clock != 0) 6305 return &aconnector->freesync_vid_base; 6306 6307 /* Find the preferred mode */ 6308 list_for_each_entry (m, list_head, head) { 6309 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6310 m_pref = m; 6311 break; 6312 } 6313 } 6314 6315 if (!m_pref) { 6316 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6317 m_pref = list_first_entry_or_null( 6318 &aconnector->base.modes, struct drm_display_mode, head); 6319 if (!m_pref) { 6320 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6321 return NULL; 6322 } 6323 } 6324 6325 highest_refresh = drm_mode_vrefresh(m_pref); 6326 6327 /* 6328 * Find the mode with highest refresh rate with same resolution. 6329 * For some monitors, preferred mode is not the mode with highest 6330 * supported refresh rate. 6331 */ 6332 list_for_each_entry (m, list_head, head) { 6333 current_refresh = drm_mode_vrefresh(m); 6334 6335 if (m->hdisplay == m_pref->hdisplay && 6336 m->vdisplay == m_pref->vdisplay && 6337 highest_refresh < current_refresh) { 6338 highest_refresh = current_refresh; 6339 m_pref = m; 6340 } 6341 } 6342 6343 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6344 return m_pref; 6345 } 6346 6347 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6348 struct amdgpu_dm_connector *aconnector) 6349 { 6350 struct drm_display_mode *high_mode; 6351 int timing_diff; 6352 6353 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6354 if (!high_mode || !mode) 6355 return false; 6356 6357 timing_diff = high_mode->vtotal - mode->vtotal; 6358 6359 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6360 high_mode->hdisplay != mode->hdisplay || 6361 high_mode->vdisplay != mode->vdisplay || 6362 high_mode->hsync_start != mode->hsync_start || 6363 high_mode->hsync_end != mode->hsync_end || 6364 high_mode->htotal != mode->htotal || 6365 high_mode->hskew != mode->hskew || 6366 high_mode->vscan != mode->vscan || 6367 high_mode->vsync_start - mode->vsync_start != timing_diff || 6368 high_mode->vsync_end - mode->vsync_end != timing_diff) 6369 return false; 6370 else 6371 return true; 6372 } 6373 6374 static struct dc_stream_state * 6375 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6376 const struct drm_display_mode *drm_mode, 6377 const struct dm_connector_state *dm_state, 6378 const struct dc_stream_state *old_stream, 6379 int requested_bpc) 6380 { 6381 struct drm_display_mode *preferred_mode = NULL; 6382 struct drm_connector *drm_connector; 6383 const struct drm_connector_state *con_state = 6384 dm_state ? &dm_state->base : NULL; 6385 struct dc_stream_state *stream = NULL; 6386 struct drm_display_mode mode = *drm_mode; 6387 struct drm_display_mode saved_mode; 6388 struct drm_display_mode *freesync_mode = NULL; 6389 bool native_mode_found = false; 6390 bool recalculate_timing = false; 6391 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; 6392 int mode_refresh; 6393 int preferred_refresh = 0; 6394 #if defined(CONFIG_DRM_AMD_DC_DCN) 6395 struct dsc_dec_dpcd_caps dsc_caps; 6396 #endif 6397 struct dc_sink *sink = NULL; 6398 6399 memset(&saved_mode, 0, sizeof(saved_mode)); 6400 6401 if (aconnector == NULL) { 6402 DRM_ERROR("aconnector is NULL!\n"); 6403 return stream; 6404 } 6405 6406 drm_connector = &aconnector->base; 6407 6408 if (!aconnector->dc_sink) { 6409 sink = create_fake_sink(aconnector); 6410 if (!sink) 6411 return stream; 6412 } else { 6413 sink = aconnector->dc_sink; 6414 dc_sink_retain(sink); 6415 } 6416 6417 stream = dc_create_stream_for_sink(sink); 6418 6419 if (stream == NULL) { 6420 DRM_ERROR("Failed to create stream for sink!\n"); 6421 goto finish; 6422 } 6423 6424 stream->dm_stream_context = aconnector; 6425 6426 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6427 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6428 6429 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6430 /* Search for preferred mode */ 6431 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6432 native_mode_found = true; 6433 break; 6434 } 6435 } 6436 if (!native_mode_found) 6437 preferred_mode = list_first_entry_or_null( 6438 &aconnector->base.modes, 6439 struct drm_display_mode, 6440 head); 6441 6442 mode_refresh = drm_mode_vrefresh(&mode); 6443 6444 if (preferred_mode == NULL) { 6445 /* 6446 * This may not be an error, the use case is when we have no 6447 * usermode calls to reset and set mode upon hotplug. In this 6448 * case, we call set mode ourselves to restore the previous mode 6449 * and the modelist may not be filled in in time. 6450 */ 6451 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6452 } else { 6453 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6454 if (recalculate_timing) { 6455 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6456 drm_mode_copy(&saved_mode, &mode); 6457 drm_mode_copy(&mode, freesync_mode); 6458 } else { 6459 decide_crtc_timing_for_drm_display_mode( 6460 &mode, preferred_mode, scale); 6461 6462 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6463 } 6464 } 6465 6466 if (recalculate_timing) 6467 drm_mode_set_crtcinfo(&saved_mode, 0); 6468 else if (!dm_state) 6469 drm_mode_set_crtcinfo(&mode, 0); 6470 6471 /* 6472 * If scaling is enabled and refresh rate didn't change 6473 * we copy the vic and polarities of the old timings 6474 */ 6475 if (!scale || mode_refresh != preferred_refresh) 6476 fill_stream_properties_from_drm_display_mode( 6477 stream, &mode, &aconnector->base, con_state, NULL, 6478 requested_bpc); 6479 else 6480 fill_stream_properties_from_drm_display_mode( 6481 stream, &mode, &aconnector->base, con_state, old_stream, 6482 requested_bpc); 6483 6484 #if defined(CONFIG_DRM_AMD_DC_DCN) 6485 /* SST DSC determination policy */ 6486 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6487 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6488 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6489 #endif 6490 6491 update_stream_scaling_settings(&mode, dm_state, stream); 6492 6493 fill_audio_info( 6494 &stream->audio_info, 6495 drm_connector, 6496 sink); 6497 6498 update_stream_signal(stream, sink); 6499 6500 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6501 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6502 6503 if (stream->link->psr_settings.psr_feature_enabled) { 6504 // 6505 // should decide stream support vsc sdp colorimetry capability 6506 // before building vsc info packet 6507 // 6508 stream->use_vsc_sdp_for_colorimetry = false; 6509 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6510 stream->use_vsc_sdp_for_colorimetry = 6511 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 6512 } else { 6513 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 6514 stream->use_vsc_sdp_for_colorimetry = true; 6515 } 6516 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); 6517 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6518 6519 } 6520 finish: 6521 dc_sink_release(sink); 6522 6523 return stream; 6524 } 6525 6526 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 6527 { 6528 drm_crtc_cleanup(crtc); 6529 kfree(crtc); 6530 } 6531 6532 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 6533 struct drm_crtc_state *state) 6534 { 6535 struct dm_crtc_state *cur = to_dm_crtc_state(state); 6536 6537 /* TODO Destroy dc_stream objects are stream object is flattened */ 6538 if (cur->stream) 6539 dc_stream_release(cur->stream); 6540 6541 6542 __drm_atomic_helper_crtc_destroy_state(state); 6543 6544 6545 kfree(state); 6546 } 6547 6548 static void dm_crtc_reset_state(struct drm_crtc *crtc) 6549 { 6550 struct dm_crtc_state *state; 6551 6552 if (crtc->state) 6553 dm_crtc_destroy_state(crtc, crtc->state); 6554 6555 state = kzalloc(sizeof(*state), GFP_KERNEL); 6556 if (WARN_ON(!state)) 6557 return; 6558 6559 __drm_atomic_helper_crtc_reset(crtc, &state->base); 6560 } 6561 6562 static struct drm_crtc_state * 6563 dm_crtc_duplicate_state(struct drm_crtc *crtc) 6564 { 6565 struct dm_crtc_state *state, *cur; 6566 6567 cur = to_dm_crtc_state(crtc->state); 6568 6569 if (WARN_ON(!crtc->state)) 6570 return NULL; 6571 6572 state = kzalloc(sizeof(*state), GFP_KERNEL); 6573 if (!state) 6574 return NULL; 6575 6576 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 6577 6578 if (cur->stream) { 6579 state->stream = cur->stream; 6580 dc_stream_retain(state->stream); 6581 } 6582 6583 state->active_planes = cur->active_planes; 6584 state->vrr_infopacket = cur->vrr_infopacket; 6585 state->abm_level = cur->abm_level; 6586 state->vrr_supported = cur->vrr_supported; 6587 state->freesync_config = cur->freesync_config; 6588 state->cm_has_degamma = cur->cm_has_degamma; 6589 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 6590 state->force_dpms_off = cur->force_dpms_off; 6591 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 6592 6593 return &state->base; 6594 } 6595 6596 #ifdef CONFIG_DEBUG_FS 6597 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) 6598 { 6599 crtc_debugfs_init(crtc); 6600 6601 return 0; 6602 } 6603 #endif 6604 6605 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 6606 { 6607 enum dc_irq_source irq_source; 6608 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6609 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6610 int rc; 6611 6612 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; 6613 6614 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 6615 6616 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 6617 acrtc->crtc_id, enable ? "en" : "dis", rc); 6618 return rc; 6619 } 6620 6621 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) 6622 { 6623 enum dc_irq_source irq_source; 6624 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6625 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6626 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 6627 struct amdgpu_display_manager *dm = &adev->dm; 6628 struct vblank_control_work *work; 6629 int rc = 0; 6630 6631 if (enable) { 6632 /* vblank irq on -> Only need vupdate irq in vrr mode */ 6633 if (amdgpu_dm_vrr_active(acrtc_state)) 6634 rc = dm_set_vupdate_irq(crtc, true); 6635 } else { 6636 /* vblank irq off -> vupdate irq off */ 6637 rc = dm_set_vupdate_irq(crtc, false); 6638 } 6639 6640 if (rc) 6641 return rc; 6642 6643 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 6644 6645 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 6646 return -EBUSY; 6647 6648 if (amdgpu_in_reset(adev)) 6649 return 0; 6650 6651 if (dm->vblank_control_workqueue) { 6652 work = kzalloc(sizeof(*work), GFP_ATOMIC); 6653 if (!work) 6654 return -ENOMEM; 6655 6656 INIT_WORK(&work->work, vblank_control_worker); 6657 work->dm = dm; 6658 work->acrtc = acrtc; 6659 work->enable = enable; 6660 6661 if (acrtc_state->stream) { 6662 dc_stream_retain(acrtc_state->stream); 6663 work->stream = acrtc_state->stream; 6664 } 6665 6666 queue_work(dm->vblank_control_workqueue, &work->work); 6667 } 6668 6669 return 0; 6670 } 6671 6672 static int dm_enable_vblank(struct drm_crtc *crtc) 6673 { 6674 return dm_set_vblank(crtc, true); 6675 } 6676 6677 static void dm_disable_vblank(struct drm_crtc *crtc) 6678 { 6679 dm_set_vblank(crtc, false); 6680 } 6681 6682 /* Implemented only the options currently availible for the driver */ 6683 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 6684 .reset = dm_crtc_reset_state, 6685 .destroy = amdgpu_dm_crtc_destroy, 6686 .set_config = drm_atomic_helper_set_config, 6687 .page_flip = drm_atomic_helper_page_flip, 6688 .atomic_duplicate_state = dm_crtc_duplicate_state, 6689 .atomic_destroy_state = dm_crtc_destroy_state, 6690 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 6691 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, 6692 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, 6693 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 6694 .enable_vblank = dm_enable_vblank, 6695 .disable_vblank = dm_disable_vblank, 6696 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 6697 #if defined(CONFIG_DEBUG_FS) 6698 .late_register = amdgpu_dm_crtc_late_register, 6699 #endif 6700 }; 6701 6702 static enum drm_connector_status 6703 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6704 { 6705 bool connected; 6706 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6707 6708 /* 6709 * Notes: 6710 * 1. This interface is NOT called in context of HPD irq. 6711 * 2. This interface *is called* in context of user-mode ioctl. Which 6712 * makes it a bad place for *any* MST-related activity. 6713 */ 6714 6715 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6716 !aconnector->fake_enable) 6717 connected = (aconnector->dc_sink != NULL); 6718 else 6719 connected = (aconnector->base.force == DRM_FORCE_ON); 6720 6721 update_subconnector_property(aconnector); 6722 6723 return (connected ? connector_status_connected : 6724 connector_status_disconnected); 6725 } 6726 6727 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6728 struct drm_connector_state *connector_state, 6729 struct drm_property *property, 6730 uint64_t val) 6731 { 6732 struct drm_device *dev = connector->dev; 6733 struct amdgpu_device *adev = drm_to_adev(dev); 6734 struct dm_connector_state *dm_old_state = 6735 to_dm_connector_state(connector->state); 6736 struct dm_connector_state *dm_new_state = 6737 to_dm_connector_state(connector_state); 6738 6739 int ret = -EINVAL; 6740 6741 if (property == dev->mode_config.scaling_mode_property) { 6742 enum amdgpu_rmx_type rmx_type; 6743 6744 switch (val) { 6745 case DRM_MODE_SCALE_CENTER: 6746 rmx_type = RMX_CENTER; 6747 break; 6748 case DRM_MODE_SCALE_ASPECT: 6749 rmx_type = RMX_ASPECT; 6750 break; 6751 case DRM_MODE_SCALE_FULLSCREEN: 6752 rmx_type = RMX_FULL; 6753 break; 6754 case DRM_MODE_SCALE_NONE: 6755 default: 6756 rmx_type = RMX_OFF; 6757 break; 6758 } 6759 6760 if (dm_old_state->scaling == rmx_type) 6761 return 0; 6762 6763 dm_new_state->scaling = rmx_type; 6764 ret = 0; 6765 } else if (property == adev->mode_info.underscan_hborder_property) { 6766 dm_new_state->underscan_hborder = val; 6767 ret = 0; 6768 } else if (property == adev->mode_info.underscan_vborder_property) { 6769 dm_new_state->underscan_vborder = val; 6770 ret = 0; 6771 } else if (property == adev->mode_info.underscan_property) { 6772 dm_new_state->underscan_enable = val; 6773 ret = 0; 6774 } else if (property == adev->mode_info.abm_level_property) { 6775 dm_new_state->abm_level = val; 6776 ret = 0; 6777 } 6778 6779 return ret; 6780 } 6781 6782 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6783 const struct drm_connector_state *state, 6784 struct drm_property *property, 6785 uint64_t *val) 6786 { 6787 struct drm_device *dev = connector->dev; 6788 struct amdgpu_device *adev = drm_to_adev(dev); 6789 struct dm_connector_state *dm_state = 6790 to_dm_connector_state(state); 6791 int ret = -EINVAL; 6792 6793 if (property == dev->mode_config.scaling_mode_property) { 6794 switch (dm_state->scaling) { 6795 case RMX_CENTER: 6796 *val = DRM_MODE_SCALE_CENTER; 6797 break; 6798 case RMX_ASPECT: 6799 *val = DRM_MODE_SCALE_ASPECT; 6800 break; 6801 case RMX_FULL: 6802 *val = DRM_MODE_SCALE_FULLSCREEN; 6803 break; 6804 case RMX_OFF: 6805 default: 6806 *val = DRM_MODE_SCALE_NONE; 6807 break; 6808 } 6809 ret = 0; 6810 } else if (property == adev->mode_info.underscan_hborder_property) { 6811 *val = dm_state->underscan_hborder; 6812 ret = 0; 6813 } else if (property == adev->mode_info.underscan_vborder_property) { 6814 *val = dm_state->underscan_vborder; 6815 ret = 0; 6816 } else if (property == adev->mode_info.underscan_property) { 6817 *val = dm_state->underscan_enable; 6818 ret = 0; 6819 } else if (property == adev->mode_info.abm_level_property) { 6820 *val = dm_state->abm_level; 6821 ret = 0; 6822 } 6823 6824 return ret; 6825 } 6826 6827 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6828 { 6829 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6830 6831 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6832 } 6833 6834 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6835 { 6836 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6837 const struct dc_link *link = aconnector->dc_link; 6838 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6839 struct amdgpu_display_manager *dm = &adev->dm; 6840 int i; 6841 6842 /* 6843 * Call only if mst_mgr was iniitalized before since it's not done 6844 * for all connector types. 6845 */ 6846 if (aconnector->mst_mgr.dev) 6847 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6848 6849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 6850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 6851 for (i = 0; i < dm->num_of_edps; i++) { 6852 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { 6853 backlight_device_unregister(dm->backlight_dev[i]); 6854 dm->backlight_dev[i] = NULL; 6855 } 6856 } 6857 #endif 6858 6859 if (aconnector->dc_em_sink) 6860 dc_sink_release(aconnector->dc_em_sink); 6861 aconnector->dc_em_sink = NULL; 6862 if (aconnector->dc_sink) 6863 dc_sink_release(aconnector->dc_sink); 6864 aconnector->dc_sink = NULL; 6865 6866 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6867 drm_connector_unregister(connector); 6868 drm_connector_cleanup(connector); 6869 if (aconnector->i2c) { 6870 i2c_del_adapter(&aconnector->i2c->base); 6871 kfree(aconnector->i2c); 6872 } 6873 kfree(aconnector->dm_dp_aux.aux.name); 6874 6875 kfree(connector); 6876 } 6877 6878 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6879 { 6880 struct dm_connector_state *state = 6881 to_dm_connector_state(connector->state); 6882 6883 if (connector->state) 6884 __drm_atomic_helper_connector_destroy_state(connector->state); 6885 6886 kfree(state); 6887 6888 state = kzalloc(sizeof(*state), GFP_KERNEL); 6889 6890 if (state) { 6891 state->scaling = RMX_OFF; 6892 state->underscan_enable = false; 6893 state->underscan_hborder = 0; 6894 state->underscan_vborder = 0; 6895 state->base.max_requested_bpc = 8; 6896 state->vcpi_slots = 0; 6897 state->pbn = 0; 6898 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6899 state->abm_level = amdgpu_dm_abm_level; 6900 6901 __drm_atomic_helper_connector_reset(connector, &state->base); 6902 } 6903 } 6904 6905 struct drm_connector_state * 6906 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6907 { 6908 struct dm_connector_state *state = 6909 to_dm_connector_state(connector->state); 6910 6911 struct dm_connector_state *new_state = 6912 kmemdup(state, sizeof(*state), GFP_KERNEL); 6913 6914 if (!new_state) 6915 return NULL; 6916 6917 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6918 6919 new_state->freesync_capable = state->freesync_capable; 6920 new_state->abm_level = state->abm_level; 6921 new_state->scaling = state->scaling; 6922 new_state->underscan_enable = state->underscan_enable; 6923 new_state->underscan_hborder = state->underscan_hborder; 6924 new_state->underscan_vborder = state->underscan_vborder; 6925 new_state->vcpi_slots = state->vcpi_slots; 6926 new_state->pbn = state->pbn; 6927 return &new_state->base; 6928 } 6929 6930 static int 6931 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6932 { 6933 struct amdgpu_dm_connector *amdgpu_dm_connector = 6934 to_amdgpu_dm_connector(connector); 6935 int r; 6936 6937 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6938 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6939 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6940 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6941 if (r) 6942 return r; 6943 } 6944 6945 #if defined(CONFIG_DEBUG_FS) 6946 connector_debugfs_init(amdgpu_dm_connector); 6947 #endif 6948 6949 return 0; 6950 } 6951 6952 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6953 .reset = amdgpu_dm_connector_funcs_reset, 6954 .detect = amdgpu_dm_connector_detect, 6955 .fill_modes = drm_helper_probe_single_connector_modes, 6956 .destroy = amdgpu_dm_connector_destroy, 6957 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6958 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6959 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6960 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6961 .late_register = amdgpu_dm_connector_late_register, 6962 .early_unregister = amdgpu_dm_connector_unregister 6963 }; 6964 6965 static int get_modes(struct drm_connector *connector) 6966 { 6967 return amdgpu_dm_connector_get_modes(connector); 6968 } 6969 6970 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6971 { 6972 struct dc_sink_init_data init_params = { 6973 .link = aconnector->dc_link, 6974 .sink_signal = SIGNAL_TYPE_VIRTUAL 6975 }; 6976 struct edid *edid; 6977 6978 if (!aconnector->base.edid_blob_ptr) { 6979 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 6980 aconnector->base.name); 6981 6982 aconnector->base.force = DRM_FORCE_OFF; 6983 aconnector->base.override_edid = false; 6984 return; 6985 } 6986 6987 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 6988 6989 aconnector->edid = edid; 6990 6991 aconnector->dc_em_sink = dc_link_add_remote_sink( 6992 aconnector->dc_link, 6993 (uint8_t *)edid, 6994 (edid->extensions + 1) * EDID_LENGTH, 6995 &init_params); 6996 6997 if (aconnector->base.force == DRM_FORCE_ON) { 6998 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6999 aconnector->dc_link->local_sink : 7000 aconnector->dc_em_sink; 7001 dc_sink_retain(aconnector->dc_sink); 7002 } 7003 } 7004 7005 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7006 { 7007 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7008 7009 /* 7010 * In case of headless boot with force on for DP managed connector 7011 * Those settings have to be != 0 to get initial modeset 7012 */ 7013 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7014 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7015 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7016 } 7017 7018 7019 aconnector->base.override_edid = true; 7020 create_eml_sink(aconnector); 7021 } 7022 7023 struct dc_stream_state * 7024 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 7025 const struct drm_display_mode *drm_mode, 7026 const struct dm_connector_state *dm_state, 7027 const struct dc_stream_state *old_stream) 7028 { 7029 struct drm_connector *connector = &aconnector->base; 7030 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7031 struct dc_stream_state *stream; 7032 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7033 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7034 enum dc_status dc_result = DC_OK; 7035 7036 do { 7037 stream = create_stream_for_sink(aconnector, drm_mode, 7038 dm_state, old_stream, 7039 requested_bpc); 7040 if (stream == NULL) { 7041 DRM_ERROR("Failed to create stream for sink!\n"); 7042 break; 7043 } 7044 7045 dc_result = dc_validate_stream(adev->dm.dc, stream); 7046 7047 if (dc_result != DC_OK) { 7048 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 7049 drm_mode->hdisplay, 7050 drm_mode->vdisplay, 7051 drm_mode->clock, 7052 dc_result, 7053 dc_status_to_str(dc_result)); 7054 7055 dc_stream_release(stream); 7056 stream = NULL; 7057 requested_bpc -= 2; /* lower bpc to retry validation */ 7058 } 7059 7060 } while (stream == NULL && requested_bpc >= 6); 7061 7062 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 7063 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 7064 7065 aconnector->force_yuv420_output = true; 7066 stream = create_validate_stream_for_sink(aconnector, drm_mode, 7067 dm_state, old_stream); 7068 aconnector->force_yuv420_output = false; 7069 } 7070 7071 return stream; 7072 } 7073 7074 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7075 struct drm_display_mode *mode) 7076 { 7077 int result = MODE_ERROR; 7078 struct dc_sink *dc_sink; 7079 /* TODO: Unhardcode stream count */ 7080 struct dc_stream_state *stream; 7081 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7082 7083 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7084 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7085 return result; 7086 7087 /* 7088 * Only run this the first time mode_valid is called to initilialize 7089 * EDID mgmt 7090 */ 7091 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7092 !aconnector->dc_em_sink) 7093 handle_edid_mgmt(aconnector); 7094 7095 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7096 7097 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7098 aconnector->base.force != DRM_FORCE_ON) { 7099 DRM_ERROR("dc_sink is NULL!\n"); 7100 goto fail; 7101 } 7102 7103 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); 7104 if (stream) { 7105 dc_stream_release(stream); 7106 result = MODE_OK; 7107 } 7108 7109 fail: 7110 /* TODO: error handling*/ 7111 return result; 7112 } 7113 7114 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7115 struct dc_info_packet *out) 7116 { 7117 struct hdmi_drm_infoframe frame; 7118 unsigned char buf[30]; /* 26 + 4 */ 7119 ssize_t len; 7120 int ret, i; 7121 7122 memset(out, 0, sizeof(*out)); 7123 7124 if (!state->hdr_output_metadata) 7125 return 0; 7126 7127 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7128 if (ret) 7129 return ret; 7130 7131 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7132 if (len < 0) 7133 return (int)len; 7134 7135 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7136 if (len != 30) 7137 return -EINVAL; 7138 7139 /* Prepare the infopacket for DC. */ 7140 switch (state->connector->connector_type) { 7141 case DRM_MODE_CONNECTOR_HDMIA: 7142 out->hb0 = 0x87; /* type */ 7143 out->hb1 = 0x01; /* version */ 7144 out->hb2 = 0x1A; /* length */ 7145 out->sb[0] = buf[3]; /* checksum */ 7146 i = 1; 7147 break; 7148 7149 case DRM_MODE_CONNECTOR_DisplayPort: 7150 case DRM_MODE_CONNECTOR_eDP: 7151 out->hb0 = 0x00; /* sdp id, zero */ 7152 out->hb1 = 0x87; /* type */ 7153 out->hb2 = 0x1D; /* payload len - 1 */ 7154 out->hb3 = (0x13 << 2); /* sdp version */ 7155 out->sb[0] = 0x01; /* version */ 7156 out->sb[1] = 0x1A; /* length */ 7157 i = 2; 7158 break; 7159 7160 default: 7161 return -EINVAL; 7162 } 7163 7164 memcpy(&out->sb[i], &buf[4], 26); 7165 out->valid = true; 7166 7167 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7168 sizeof(out->sb), false); 7169 7170 return 0; 7171 } 7172 7173 static int 7174 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7175 struct drm_atomic_state *state) 7176 { 7177 struct drm_connector_state *new_con_state = 7178 drm_atomic_get_new_connector_state(state, conn); 7179 struct drm_connector_state *old_con_state = 7180 drm_atomic_get_old_connector_state(state, conn); 7181 struct drm_crtc *crtc = new_con_state->crtc; 7182 struct drm_crtc_state *new_crtc_state; 7183 int ret; 7184 7185 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7186 7187 if (!crtc) 7188 return 0; 7189 7190 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7191 struct dc_info_packet hdr_infopacket; 7192 7193 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7194 if (ret) 7195 return ret; 7196 7197 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7198 if (IS_ERR(new_crtc_state)) 7199 return PTR_ERR(new_crtc_state); 7200 7201 /* 7202 * DC considers the stream backends changed if the 7203 * static metadata changes. Forcing the modeset also 7204 * gives a simple way for userspace to switch from 7205 * 8bpc to 10bpc when setting the metadata to enter 7206 * or exit HDR. 7207 * 7208 * Changing the static metadata after it's been 7209 * set is permissible, however. So only force a 7210 * modeset if we're entering or exiting HDR. 7211 */ 7212 new_crtc_state->mode_changed = 7213 !old_con_state->hdr_output_metadata || 7214 !new_con_state->hdr_output_metadata; 7215 } 7216 7217 return 0; 7218 } 7219 7220 static const struct drm_connector_helper_funcs 7221 amdgpu_dm_connector_helper_funcs = { 7222 /* 7223 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7224 * modes will be filtered by drm_mode_validate_size(), and those modes 7225 * are missing after user start lightdm. So we need to renew modes list. 7226 * in get_modes call back, not just return the modes count 7227 */ 7228 .get_modes = get_modes, 7229 .mode_valid = amdgpu_dm_connector_mode_valid, 7230 .atomic_check = amdgpu_dm_connector_atomic_check, 7231 }; 7232 7233 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 7234 { 7235 } 7236 7237 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 7238 { 7239 struct drm_atomic_state *state = new_crtc_state->state; 7240 struct drm_plane *plane; 7241 int num_active = 0; 7242 7243 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { 7244 struct drm_plane_state *new_plane_state; 7245 7246 /* Cursor planes are "fake". */ 7247 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7248 continue; 7249 7250 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 7251 7252 if (!new_plane_state) { 7253 /* 7254 * The plane is enable on the CRTC and hasn't changed 7255 * state. This means that it previously passed 7256 * validation and is therefore enabled. 7257 */ 7258 num_active += 1; 7259 continue; 7260 } 7261 7262 /* We need a framebuffer to be considered enabled. */ 7263 num_active += (new_plane_state->fb != NULL); 7264 } 7265 7266 return num_active; 7267 } 7268 7269 static void dm_update_crtc_active_planes(struct drm_crtc *crtc, 7270 struct drm_crtc_state *new_crtc_state) 7271 { 7272 struct dm_crtc_state *dm_new_crtc_state = 7273 to_dm_crtc_state(new_crtc_state); 7274 7275 dm_new_crtc_state->active_planes = 0; 7276 7277 if (!dm_new_crtc_state->stream) 7278 return; 7279 7280 dm_new_crtc_state->active_planes = 7281 count_crtc_active_planes(new_crtc_state); 7282 } 7283 7284 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 7285 struct drm_atomic_state *state) 7286 { 7287 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 7288 crtc); 7289 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 7290 struct dc *dc = adev->dm.dc; 7291 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 7292 int ret = -EINVAL; 7293 7294 trace_amdgpu_dm_crtc_atomic_check(crtc_state); 7295 7296 dm_update_crtc_active_planes(crtc, crtc_state); 7297 7298 if (WARN_ON(unlikely(!dm_crtc_state->stream && 7299 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { 7300 return ret; 7301 } 7302 7303 /* 7304 * We require the primary plane to be enabled whenever the CRTC is, otherwise 7305 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other 7306 * planes are disabled, which is not supported by the hardware. And there is legacy 7307 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. 7308 */ 7309 if (crtc_state->enable && 7310 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { 7311 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); 7312 return -EINVAL; 7313 } 7314 7315 /* In some use cases, like reset, no stream is attached */ 7316 if (!dm_crtc_state->stream) 7317 return 0; 7318 7319 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 7320 return 0; 7321 7322 DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); 7323 return ret; 7324 } 7325 7326 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 7327 const struct drm_display_mode *mode, 7328 struct drm_display_mode *adjusted_mode) 7329 { 7330 return true; 7331 } 7332 7333 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 7334 .disable = dm_crtc_helper_disable, 7335 .atomic_check = dm_crtc_helper_atomic_check, 7336 .mode_fixup = dm_crtc_helper_mode_fixup, 7337 .get_scanout_position = amdgpu_crtc_get_scanout_position, 7338 }; 7339 7340 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7341 { 7342 7343 } 7344 7345 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) 7346 { 7347 switch (display_color_depth) { 7348 case COLOR_DEPTH_666: 7349 return 6; 7350 case COLOR_DEPTH_888: 7351 return 8; 7352 case COLOR_DEPTH_101010: 7353 return 10; 7354 case COLOR_DEPTH_121212: 7355 return 12; 7356 case COLOR_DEPTH_141414: 7357 return 14; 7358 case COLOR_DEPTH_161616: 7359 return 16; 7360 default: 7361 break; 7362 } 7363 return 0; 7364 } 7365 7366 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7367 struct drm_crtc_state *crtc_state, 7368 struct drm_connector_state *conn_state) 7369 { 7370 struct drm_atomic_state *state = crtc_state->state; 7371 struct drm_connector *connector = conn_state->connector; 7372 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7373 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7374 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7375 struct drm_dp_mst_topology_mgr *mst_mgr; 7376 struct drm_dp_mst_port *mst_port; 7377 enum dc_color_depth color_depth; 7378 int clock, bpp = 0; 7379 bool is_y420 = false; 7380 7381 if (!aconnector->port || !aconnector->dc_sink) 7382 return 0; 7383 7384 mst_port = aconnector->port; 7385 mst_mgr = &aconnector->mst_port->mst_mgr; 7386 7387 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7388 return 0; 7389 7390 if (!state->duplicated) { 7391 int max_bpc = conn_state->max_requested_bpc; 7392 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7393 aconnector->force_yuv420_output; 7394 color_depth = convert_color_depth_from_display_info(connector, 7395 is_y420, 7396 max_bpc); 7397 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7398 clock = adjusted_mode->clock; 7399 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 7400 } 7401 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, 7402 mst_mgr, 7403 mst_port, 7404 dm_new_connector_state->pbn, 7405 dm_mst_get_pbn_divider(aconnector->dc_link)); 7406 if (dm_new_connector_state->vcpi_slots < 0) { 7407 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7408 return dm_new_connector_state->vcpi_slots; 7409 } 7410 return 0; 7411 } 7412 7413 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7414 .disable = dm_encoder_helper_disable, 7415 .atomic_check = dm_encoder_helper_atomic_check 7416 }; 7417 7418 #if defined(CONFIG_DRM_AMD_DC_DCN) 7419 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7420 struct dc_state *dc_state, 7421 struct dsc_mst_fairness_vars *vars) 7422 { 7423 struct dc_stream_state *stream = NULL; 7424 struct drm_connector *connector; 7425 struct drm_connector_state *new_con_state; 7426 struct amdgpu_dm_connector *aconnector; 7427 struct dm_connector_state *dm_conn_state; 7428 int i, j; 7429 int vcpi, pbn_div, pbn, slot_num = 0; 7430 7431 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7432 7433 aconnector = to_amdgpu_dm_connector(connector); 7434 7435 if (!aconnector->port) 7436 continue; 7437 7438 if (!new_con_state || !new_con_state->crtc) 7439 continue; 7440 7441 dm_conn_state = to_dm_connector_state(new_con_state); 7442 7443 for (j = 0; j < dc_state->stream_count; j++) { 7444 stream = dc_state->streams[j]; 7445 if (!stream) 7446 continue; 7447 7448 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) 7449 break; 7450 7451 stream = NULL; 7452 } 7453 7454 if (!stream) 7455 continue; 7456 7457 pbn_div = dm_mst_get_pbn_divider(stream->link); 7458 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7459 for (j = 0; j < dc_state->stream_count; j++) { 7460 if (vars[j].aconnector == aconnector) { 7461 pbn = vars[j].pbn; 7462 break; 7463 } 7464 } 7465 7466 if (j == dc_state->stream_count) 7467 continue; 7468 7469 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7470 7471 if (stream->timing.flags.DSC != 1) { 7472 dm_conn_state->pbn = pbn; 7473 dm_conn_state->vcpi_slots = slot_num; 7474 7475 drm_dp_mst_atomic_enable_dsc(state, 7476 aconnector->port, 7477 dm_conn_state->pbn, 7478 0, 7479 false); 7480 continue; 7481 } 7482 7483 vcpi = drm_dp_mst_atomic_enable_dsc(state, 7484 aconnector->port, 7485 pbn, pbn_div, 7486 true); 7487 if (vcpi < 0) 7488 return vcpi; 7489 7490 dm_conn_state->pbn = pbn; 7491 dm_conn_state->vcpi_slots = vcpi; 7492 } 7493 return 0; 7494 } 7495 #endif 7496 7497 static void dm_drm_plane_reset(struct drm_plane *plane) 7498 { 7499 struct dm_plane_state *amdgpu_state = NULL; 7500 7501 if (plane->state) 7502 plane->funcs->atomic_destroy_state(plane, plane->state); 7503 7504 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 7505 WARN_ON(amdgpu_state == NULL); 7506 7507 if (amdgpu_state) 7508 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 7509 } 7510 7511 static struct drm_plane_state * 7512 dm_drm_plane_duplicate_state(struct drm_plane *plane) 7513 { 7514 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 7515 7516 old_dm_plane_state = to_dm_plane_state(plane->state); 7517 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 7518 if (!dm_plane_state) 7519 return NULL; 7520 7521 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 7522 7523 if (old_dm_plane_state->dc_state) { 7524 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 7525 dc_plane_state_retain(dm_plane_state->dc_state); 7526 } 7527 7528 return &dm_plane_state->base; 7529 } 7530 7531 static void dm_drm_plane_destroy_state(struct drm_plane *plane, 7532 struct drm_plane_state *state) 7533 { 7534 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 7535 7536 if (dm_plane_state->dc_state) 7537 dc_plane_state_release(dm_plane_state->dc_state); 7538 7539 drm_atomic_helper_plane_destroy_state(plane, state); 7540 } 7541 7542 static const struct drm_plane_funcs dm_plane_funcs = { 7543 .update_plane = drm_atomic_helper_update_plane, 7544 .disable_plane = drm_atomic_helper_disable_plane, 7545 .destroy = drm_primary_helper_destroy, 7546 .reset = dm_drm_plane_reset, 7547 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 7548 .atomic_destroy_state = dm_drm_plane_destroy_state, 7549 .format_mod_supported = dm_plane_format_mod_supported, 7550 }; 7551 7552 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 7553 struct drm_plane_state *new_state) 7554 { 7555 struct amdgpu_framebuffer *afb; 7556 struct drm_gem_object *obj; 7557 struct amdgpu_device *adev; 7558 struct amdgpu_bo *rbo; 7559 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 7560 uint32_t domain; 7561 int r; 7562 7563 if (!new_state->fb) { 7564 DRM_DEBUG_KMS("No FB bound\n"); 7565 return 0; 7566 } 7567 7568 afb = to_amdgpu_framebuffer(new_state->fb); 7569 obj = new_state->fb->obj[0]; 7570 rbo = gem_to_amdgpu_bo(obj); 7571 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 7572 7573 r = amdgpu_bo_reserve(rbo, true); 7574 if (r) { 7575 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 7576 return r; 7577 } 7578 7579 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 7580 if (r) { 7581 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 7582 goto error_unlock; 7583 } 7584 7585 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7586 domain = amdgpu_display_supported_domains(adev, rbo->flags); 7587 else 7588 domain = AMDGPU_GEM_DOMAIN_VRAM; 7589 7590 r = amdgpu_bo_pin(rbo, domain); 7591 if (unlikely(r != 0)) { 7592 if (r != -ERESTARTSYS) 7593 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 7594 goto error_unlock; 7595 } 7596 7597 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 7598 if (unlikely(r != 0)) { 7599 DRM_ERROR("%p bind failed\n", rbo); 7600 goto error_unpin; 7601 } 7602 7603 r = drm_gem_plane_helper_prepare_fb(plane, new_state); 7604 if (unlikely(r != 0)) 7605 goto error_unpin; 7606 7607 amdgpu_bo_unreserve(rbo); 7608 7609 afb->address = amdgpu_bo_gpu_offset(rbo); 7610 7611 amdgpu_bo_ref(rbo); 7612 7613 /** 7614 * We don't do surface updates on planes that have been newly created, 7615 * but we also don't have the afb->address during atomic check. 7616 * 7617 * Fill in buffer attributes depending on the address here, but only on 7618 * newly created planes since they're not being used by DC yet and this 7619 * won't modify global state. 7620 */ 7621 dm_plane_state_old = to_dm_plane_state(plane->state); 7622 dm_plane_state_new = to_dm_plane_state(new_state); 7623 7624 if (dm_plane_state_new->dc_state && 7625 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 7626 struct dc_plane_state *plane_state = 7627 dm_plane_state_new->dc_state; 7628 bool force_disable_dcc = !plane_state->dcc.enable; 7629 7630 fill_plane_buffer_attributes( 7631 adev, afb, plane_state->format, plane_state->rotation, 7632 afb->tiling_flags, 7633 &plane_state->tiling_info, &plane_state->plane_size, 7634 &plane_state->dcc, &plane_state->address, 7635 afb->tmz_surface, force_disable_dcc); 7636 } 7637 7638 return 0; 7639 7640 error_unpin: 7641 amdgpu_bo_unpin(rbo); 7642 7643 error_unlock: 7644 amdgpu_bo_unreserve(rbo); 7645 return r; 7646 } 7647 7648 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 7649 struct drm_plane_state *old_state) 7650 { 7651 struct amdgpu_bo *rbo; 7652 int r; 7653 7654 if (!old_state->fb) 7655 return; 7656 7657 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 7658 r = amdgpu_bo_reserve(rbo, false); 7659 if (unlikely(r)) { 7660 DRM_ERROR("failed to reserve rbo before unpin\n"); 7661 return; 7662 } 7663 7664 amdgpu_bo_unpin(rbo); 7665 amdgpu_bo_unreserve(rbo); 7666 amdgpu_bo_unref(&rbo); 7667 } 7668 7669 static int dm_plane_helper_check_state(struct drm_plane_state *state, 7670 struct drm_crtc_state *new_crtc_state) 7671 { 7672 struct drm_framebuffer *fb = state->fb; 7673 int min_downscale, max_upscale; 7674 int min_scale = 0; 7675 int max_scale = INT_MAX; 7676 7677 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 7678 if (fb && state->crtc) { 7679 /* Validate viewport to cover the case when only the position changes */ 7680 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 7681 int viewport_width = state->crtc_w; 7682 int viewport_height = state->crtc_h; 7683 7684 if (state->crtc_x < 0) 7685 viewport_width += state->crtc_x; 7686 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 7687 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 7688 7689 if (state->crtc_y < 0) 7690 viewport_height += state->crtc_y; 7691 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 7692 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 7693 7694 if (viewport_width < 0 || viewport_height < 0) { 7695 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 7696 return -EINVAL; 7697 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 7698 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 7699 return -EINVAL; 7700 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 7701 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 7702 return -EINVAL; 7703 } 7704 7705 } 7706 7707 /* Get min/max allowed scaling factors from plane caps. */ 7708 get_min_max_dc_plane_scaling(state->crtc->dev, fb, 7709 &min_downscale, &max_upscale); 7710 /* 7711 * Convert to drm convention: 16.16 fixed point, instead of dc's 7712 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 7713 * dst/src, so min_scale = 1.0 / max_upscale, etc. 7714 */ 7715 min_scale = (1000 << 16) / max_upscale; 7716 max_scale = (1000 << 16) / min_downscale; 7717 } 7718 7719 return drm_atomic_helper_check_plane_state( 7720 state, new_crtc_state, min_scale, max_scale, true, true); 7721 } 7722 7723 static int dm_plane_atomic_check(struct drm_plane *plane, 7724 struct drm_atomic_state *state) 7725 { 7726 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 7727 plane); 7728 struct amdgpu_device *adev = drm_to_adev(plane->dev); 7729 struct dc *dc = adev->dm.dc; 7730 struct dm_plane_state *dm_plane_state; 7731 struct dc_scaling_info scaling_info; 7732 struct drm_crtc_state *new_crtc_state; 7733 int ret; 7734 7735 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 7736 7737 dm_plane_state = to_dm_plane_state(new_plane_state); 7738 7739 if (!dm_plane_state->dc_state) 7740 return 0; 7741 7742 new_crtc_state = 7743 drm_atomic_get_new_crtc_state(state, 7744 new_plane_state->crtc); 7745 if (!new_crtc_state) 7746 return -EINVAL; 7747 7748 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 7749 if (ret) 7750 return ret; 7751 7752 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 7753 if (ret) 7754 return ret; 7755 7756 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 7757 return 0; 7758 7759 return -EINVAL; 7760 } 7761 7762 static int dm_plane_atomic_async_check(struct drm_plane *plane, 7763 struct drm_atomic_state *state) 7764 { 7765 /* Only support async updates on cursor planes. */ 7766 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7767 return -EINVAL; 7768 7769 return 0; 7770 } 7771 7772 static void dm_plane_atomic_async_update(struct drm_plane *plane, 7773 struct drm_atomic_state *state) 7774 { 7775 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 7776 plane); 7777 struct drm_plane_state *old_state = 7778 drm_atomic_get_old_plane_state(state, plane); 7779 7780 trace_amdgpu_dm_atomic_update_cursor(new_state); 7781 7782 swap(plane->state->fb, new_state->fb); 7783 7784 plane->state->src_x = new_state->src_x; 7785 plane->state->src_y = new_state->src_y; 7786 plane->state->src_w = new_state->src_w; 7787 plane->state->src_h = new_state->src_h; 7788 plane->state->crtc_x = new_state->crtc_x; 7789 plane->state->crtc_y = new_state->crtc_y; 7790 plane->state->crtc_w = new_state->crtc_w; 7791 plane->state->crtc_h = new_state->crtc_h; 7792 7793 handle_cursor_update(plane, old_state); 7794 } 7795 7796 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 7797 .prepare_fb = dm_plane_helper_prepare_fb, 7798 .cleanup_fb = dm_plane_helper_cleanup_fb, 7799 .atomic_check = dm_plane_atomic_check, 7800 .atomic_async_check = dm_plane_atomic_async_check, 7801 .atomic_async_update = dm_plane_atomic_async_update 7802 }; 7803 7804 /* 7805 * TODO: these are currently initialized to rgb formats only. 7806 * For future use cases we should either initialize them dynamically based on 7807 * plane capabilities, or initialize this array to all formats, so internal drm 7808 * check will succeed, and let DC implement proper check 7809 */ 7810 static const uint32_t rgb_formats[] = { 7811 DRM_FORMAT_XRGB8888, 7812 DRM_FORMAT_ARGB8888, 7813 DRM_FORMAT_RGBA8888, 7814 DRM_FORMAT_XRGB2101010, 7815 DRM_FORMAT_XBGR2101010, 7816 DRM_FORMAT_ARGB2101010, 7817 DRM_FORMAT_ABGR2101010, 7818 DRM_FORMAT_XRGB16161616, 7819 DRM_FORMAT_XBGR16161616, 7820 DRM_FORMAT_ARGB16161616, 7821 DRM_FORMAT_ABGR16161616, 7822 DRM_FORMAT_XBGR8888, 7823 DRM_FORMAT_ABGR8888, 7824 DRM_FORMAT_RGB565, 7825 }; 7826 7827 static const uint32_t overlay_formats[] = { 7828 DRM_FORMAT_XRGB8888, 7829 DRM_FORMAT_ARGB8888, 7830 DRM_FORMAT_RGBA8888, 7831 DRM_FORMAT_XBGR8888, 7832 DRM_FORMAT_ABGR8888, 7833 DRM_FORMAT_RGB565 7834 }; 7835 7836 static const u32 cursor_formats[] = { 7837 DRM_FORMAT_ARGB8888 7838 }; 7839 7840 static int get_plane_formats(const struct drm_plane *plane, 7841 const struct dc_plane_cap *plane_cap, 7842 uint32_t *formats, int max_formats) 7843 { 7844 int i, num_formats = 0; 7845 7846 /* 7847 * TODO: Query support for each group of formats directly from 7848 * DC plane caps. This will require adding more formats to the 7849 * caps list. 7850 */ 7851 7852 switch (plane->type) { 7853 case DRM_PLANE_TYPE_PRIMARY: 7854 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 7855 if (num_formats >= max_formats) 7856 break; 7857 7858 formats[num_formats++] = rgb_formats[i]; 7859 } 7860 7861 if (plane_cap && plane_cap->pixel_format_support.nv12) 7862 formats[num_formats++] = DRM_FORMAT_NV12; 7863 if (plane_cap && plane_cap->pixel_format_support.p010) 7864 formats[num_formats++] = DRM_FORMAT_P010; 7865 if (plane_cap && plane_cap->pixel_format_support.fp16) { 7866 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 7867 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 7868 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 7869 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 7870 } 7871 break; 7872 7873 case DRM_PLANE_TYPE_OVERLAY: 7874 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 7875 if (num_formats >= max_formats) 7876 break; 7877 7878 formats[num_formats++] = overlay_formats[i]; 7879 } 7880 break; 7881 7882 case DRM_PLANE_TYPE_CURSOR: 7883 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 7884 if (num_formats >= max_formats) 7885 break; 7886 7887 formats[num_formats++] = cursor_formats[i]; 7888 } 7889 break; 7890 } 7891 7892 return num_formats; 7893 } 7894 7895 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 7896 struct drm_plane *plane, 7897 unsigned long possible_crtcs, 7898 const struct dc_plane_cap *plane_cap) 7899 { 7900 uint32_t formats[32]; 7901 int num_formats; 7902 int res = -EPERM; 7903 unsigned int supported_rotations; 7904 uint64_t *modifiers = NULL; 7905 7906 num_formats = get_plane_formats(plane, plane_cap, formats, 7907 ARRAY_SIZE(formats)); 7908 7909 res = get_plane_modifiers(dm->adev, plane->type, &modifiers); 7910 if (res) 7911 return res; 7912 7913 if (modifiers == NULL) 7914 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 7915 7916 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 7917 &dm_plane_funcs, formats, num_formats, 7918 modifiers, plane->type, NULL); 7919 kfree(modifiers); 7920 if (res) 7921 return res; 7922 7923 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 7924 plane_cap && plane_cap->per_pixel_alpha) { 7925 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 7926 BIT(DRM_MODE_BLEND_PREMULTI) | 7927 BIT(DRM_MODE_BLEND_COVERAGE); 7928 7929 drm_plane_create_alpha_property(plane); 7930 drm_plane_create_blend_mode_property(plane, blend_caps); 7931 } 7932 7933 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 7934 plane_cap && 7935 (plane_cap->pixel_format_support.nv12 || 7936 plane_cap->pixel_format_support.p010)) { 7937 /* This only affects YUV formats. */ 7938 drm_plane_create_color_properties( 7939 plane, 7940 BIT(DRM_COLOR_YCBCR_BT601) | 7941 BIT(DRM_COLOR_YCBCR_BT709) | 7942 BIT(DRM_COLOR_YCBCR_BT2020), 7943 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 7944 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 7945 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 7946 } 7947 7948 supported_rotations = 7949 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 7950 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 7951 7952 if (dm->adev->asic_type >= CHIP_BONAIRE && 7953 plane->type != DRM_PLANE_TYPE_CURSOR) 7954 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 7955 supported_rotations); 7956 7957 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 7958 7959 /* Create (reset) the plane state */ 7960 if (plane->funcs->reset) 7961 plane->funcs->reset(plane); 7962 7963 return 0; 7964 } 7965 7966 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 7967 struct drm_plane *plane, 7968 uint32_t crtc_index) 7969 { 7970 struct amdgpu_crtc *acrtc = NULL; 7971 struct drm_plane *cursor_plane; 7972 7973 int res = -ENOMEM; 7974 7975 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 7976 if (!cursor_plane) 7977 goto fail; 7978 7979 cursor_plane->type = DRM_PLANE_TYPE_CURSOR; 7980 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); 7981 7982 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 7983 if (!acrtc) 7984 goto fail; 7985 7986 res = drm_crtc_init_with_planes( 7987 dm->ddev, 7988 &acrtc->base, 7989 plane, 7990 cursor_plane, 7991 &amdgpu_dm_crtc_funcs, NULL); 7992 7993 if (res) 7994 goto fail; 7995 7996 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 7997 7998 /* Create (reset) the plane state */ 7999 if (acrtc->base.funcs->reset) 8000 acrtc->base.funcs->reset(&acrtc->base); 8001 8002 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 8003 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 8004 8005 acrtc->crtc_id = crtc_index; 8006 acrtc->base.enabled = false; 8007 acrtc->otg_inst = -1; 8008 8009 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 8010 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 8011 true, MAX_COLOR_LUT_ENTRIES); 8012 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 8013 8014 return 0; 8015 8016 fail: 8017 kfree(acrtc); 8018 kfree(cursor_plane); 8019 return res; 8020 } 8021 8022 8023 static int to_drm_connector_type(enum signal_type st) 8024 { 8025 switch (st) { 8026 case SIGNAL_TYPE_HDMI_TYPE_A: 8027 return DRM_MODE_CONNECTOR_HDMIA; 8028 case SIGNAL_TYPE_EDP: 8029 return DRM_MODE_CONNECTOR_eDP; 8030 case SIGNAL_TYPE_LVDS: 8031 return DRM_MODE_CONNECTOR_LVDS; 8032 case SIGNAL_TYPE_RGB: 8033 return DRM_MODE_CONNECTOR_VGA; 8034 case SIGNAL_TYPE_DISPLAY_PORT: 8035 case SIGNAL_TYPE_DISPLAY_PORT_MST: 8036 return DRM_MODE_CONNECTOR_DisplayPort; 8037 case SIGNAL_TYPE_DVI_DUAL_LINK: 8038 case SIGNAL_TYPE_DVI_SINGLE_LINK: 8039 return DRM_MODE_CONNECTOR_DVID; 8040 case SIGNAL_TYPE_VIRTUAL: 8041 return DRM_MODE_CONNECTOR_VIRTUAL; 8042 8043 default: 8044 return DRM_MODE_CONNECTOR_Unknown; 8045 } 8046 } 8047 8048 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 8049 { 8050 struct drm_encoder *encoder; 8051 8052 /* There is only one encoder per connector */ 8053 drm_connector_for_each_possible_encoder(connector, encoder) 8054 return encoder; 8055 8056 return NULL; 8057 } 8058 8059 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 8060 { 8061 struct drm_encoder *encoder; 8062 struct amdgpu_encoder *amdgpu_encoder; 8063 8064 encoder = amdgpu_dm_connector_to_encoder(connector); 8065 8066 if (encoder == NULL) 8067 return; 8068 8069 amdgpu_encoder = to_amdgpu_encoder(encoder); 8070 8071 amdgpu_encoder->native_mode.clock = 0; 8072 8073 if (!list_empty(&connector->probed_modes)) { 8074 struct drm_display_mode *preferred_mode = NULL; 8075 8076 list_for_each_entry(preferred_mode, 8077 &connector->probed_modes, 8078 head) { 8079 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 8080 amdgpu_encoder->native_mode = *preferred_mode; 8081 8082 break; 8083 } 8084 8085 } 8086 } 8087 8088 static struct drm_display_mode * 8089 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 8090 char *name, 8091 int hdisplay, int vdisplay) 8092 { 8093 struct drm_device *dev = encoder->dev; 8094 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8095 struct drm_display_mode *mode = NULL; 8096 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8097 8098 mode = drm_mode_duplicate(dev, native_mode); 8099 8100 if (mode == NULL) 8101 return NULL; 8102 8103 mode->hdisplay = hdisplay; 8104 mode->vdisplay = vdisplay; 8105 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8106 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 8107 8108 return mode; 8109 8110 } 8111 8112 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 8113 struct drm_connector *connector) 8114 { 8115 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8116 struct drm_display_mode *mode = NULL; 8117 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8118 struct amdgpu_dm_connector *amdgpu_dm_connector = 8119 to_amdgpu_dm_connector(connector); 8120 int i; 8121 int n; 8122 struct mode_size { 8123 char name[DRM_DISPLAY_MODE_LEN]; 8124 int w; 8125 int h; 8126 } common_modes[] = { 8127 { "640x480", 640, 480}, 8128 { "800x600", 800, 600}, 8129 { "1024x768", 1024, 768}, 8130 { "1280x720", 1280, 720}, 8131 { "1280x800", 1280, 800}, 8132 {"1280x1024", 1280, 1024}, 8133 { "1440x900", 1440, 900}, 8134 {"1680x1050", 1680, 1050}, 8135 {"1600x1200", 1600, 1200}, 8136 {"1920x1080", 1920, 1080}, 8137 {"1920x1200", 1920, 1200} 8138 }; 8139 8140 n = ARRAY_SIZE(common_modes); 8141 8142 for (i = 0; i < n; i++) { 8143 struct drm_display_mode *curmode = NULL; 8144 bool mode_existed = false; 8145 8146 if (common_modes[i].w > native_mode->hdisplay || 8147 common_modes[i].h > native_mode->vdisplay || 8148 (common_modes[i].w == native_mode->hdisplay && 8149 common_modes[i].h == native_mode->vdisplay)) 8150 continue; 8151 8152 list_for_each_entry(curmode, &connector->probed_modes, head) { 8153 if (common_modes[i].w == curmode->hdisplay && 8154 common_modes[i].h == curmode->vdisplay) { 8155 mode_existed = true; 8156 break; 8157 } 8158 } 8159 8160 if (mode_existed) 8161 continue; 8162 8163 mode = amdgpu_dm_create_common_mode(encoder, 8164 common_modes[i].name, common_modes[i].w, 8165 common_modes[i].h); 8166 if (!mode) 8167 continue; 8168 8169 drm_mode_probed_add(connector, mode); 8170 amdgpu_dm_connector->num_modes++; 8171 } 8172 } 8173 8174 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 8175 { 8176 struct drm_encoder *encoder; 8177 struct amdgpu_encoder *amdgpu_encoder; 8178 const struct drm_display_mode *native_mode; 8179 8180 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 8181 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 8182 return; 8183 8184 encoder = amdgpu_dm_connector_to_encoder(connector); 8185 if (!encoder) 8186 return; 8187 8188 amdgpu_encoder = to_amdgpu_encoder(encoder); 8189 8190 native_mode = &amdgpu_encoder->native_mode; 8191 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 8192 return; 8193 8194 drm_connector_set_panel_orientation_with_quirk(connector, 8195 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 8196 native_mode->hdisplay, 8197 native_mode->vdisplay); 8198 } 8199 8200 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 8201 struct edid *edid) 8202 { 8203 struct amdgpu_dm_connector *amdgpu_dm_connector = 8204 to_amdgpu_dm_connector(connector); 8205 8206 if (edid) { 8207 /* empty probed_modes */ 8208 INIT_LIST_HEAD(&connector->probed_modes); 8209 amdgpu_dm_connector->num_modes = 8210 drm_add_edid_modes(connector, edid); 8211 8212 /* sorting the probed modes before calling function 8213 * amdgpu_dm_get_native_mode() since EDID can have 8214 * more than one preferred mode. The modes that are 8215 * later in the probed mode list could be of higher 8216 * and preferred resolution. For example, 3840x2160 8217 * resolution in base EDID preferred timing and 4096x2160 8218 * preferred resolution in DID extension block later. 8219 */ 8220 drm_mode_sort(&connector->probed_modes); 8221 amdgpu_dm_get_native_mode(connector); 8222 8223 /* Freesync capabilities are reset by calling 8224 * drm_add_edid_modes() and need to be 8225 * restored here. 8226 */ 8227 amdgpu_dm_update_freesync_caps(connector, edid); 8228 8229 amdgpu_set_panel_orientation(connector); 8230 } else { 8231 amdgpu_dm_connector->num_modes = 0; 8232 } 8233 } 8234 8235 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 8236 struct drm_display_mode *mode) 8237 { 8238 struct drm_display_mode *m; 8239 8240 list_for_each_entry (m, &aconnector->base.probed_modes, head) { 8241 if (drm_mode_equal(m, mode)) 8242 return true; 8243 } 8244 8245 return false; 8246 } 8247 8248 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 8249 { 8250 const struct drm_display_mode *m; 8251 struct drm_display_mode *new_mode; 8252 uint i; 8253 uint32_t new_modes_count = 0; 8254 8255 /* Standard FPS values 8256 * 8257 * 23.976 - TV/NTSC 8258 * 24 - Cinema 8259 * 25 - TV/PAL 8260 * 29.97 - TV/NTSC 8261 * 30 - TV/NTSC 8262 * 48 - Cinema HFR 8263 * 50 - TV/PAL 8264 * 60 - Commonly used 8265 * 48,72,96,120 - Multiples of 24 8266 */ 8267 static const uint32_t common_rates[] = { 8268 23976, 24000, 25000, 29970, 30000, 8269 48000, 50000, 60000, 72000, 96000, 120000 8270 }; 8271 8272 /* 8273 * Find mode with highest refresh rate with the same resolution 8274 * as the preferred mode. Some monitors report a preferred mode 8275 * with lower resolution than the highest refresh rate supported. 8276 */ 8277 8278 m = get_highest_refresh_rate_mode(aconnector, true); 8279 if (!m) 8280 return 0; 8281 8282 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8283 uint64_t target_vtotal, target_vtotal_diff; 8284 uint64_t num, den; 8285 8286 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8287 continue; 8288 8289 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8290 common_rates[i] > aconnector->max_vfreq * 1000) 8291 continue; 8292 8293 num = (unsigned long long)m->clock * 1000 * 1000; 8294 den = common_rates[i] * (unsigned long long)m->htotal; 8295 target_vtotal = div_u64(num, den); 8296 target_vtotal_diff = target_vtotal - m->vtotal; 8297 8298 /* Check for illegal modes */ 8299 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8300 m->vsync_end + target_vtotal_diff < m->vsync_start || 8301 m->vtotal + target_vtotal_diff < m->vsync_end) 8302 continue; 8303 8304 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8305 if (!new_mode) 8306 goto out; 8307 8308 new_mode->vtotal += (u16)target_vtotal_diff; 8309 new_mode->vsync_start += (u16)target_vtotal_diff; 8310 new_mode->vsync_end += (u16)target_vtotal_diff; 8311 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8312 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8313 8314 if (!is_duplicate_mode(aconnector, new_mode)) { 8315 drm_mode_probed_add(&aconnector->base, new_mode); 8316 new_modes_count += 1; 8317 } else 8318 drm_mode_destroy(aconnector->base.dev, new_mode); 8319 } 8320 out: 8321 return new_modes_count; 8322 } 8323 8324 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8325 struct edid *edid) 8326 { 8327 struct amdgpu_dm_connector *amdgpu_dm_connector = 8328 to_amdgpu_dm_connector(connector); 8329 8330 if (!edid) 8331 return; 8332 8333 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8334 amdgpu_dm_connector->num_modes += 8335 add_fs_modes(amdgpu_dm_connector); 8336 } 8337 8338 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8339 { 8340 struct amdgpu_dm_connector *amdgpu_dm_connector = 8341 to_amdgpu_dm_connector(connector); 8342 struct drm_encoder *encoder; 8343 struct edid *edid = amdgpu_dm_connector->edid; 8344 8345 encoder = amdgpu_dm_connector_to_encoder(connector); 8346 8347 if (!drm_edid_is_valid(edid)) { 8348 amdgpu_dm_connector->num_modes = 8349 drm_add_modes_noedid(connector, 640, 480); 8350 } else { 8351 amdgpu_dm_connector_ddc_get_modes(connector, edid); 8352 amdgpu_dm_connector_add_common_modes(encoder, connector); 8353 amdgpu_dm_connector_add_freesync_modes(connector, edid); 8354 } 8355 amdgpu_dm_fbc_init(connector); 8356 8357 return amdgpu_dm_connector->num_modes; 8358 } 8359 8360 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8361 struct amdgpu_dm_connector *aconnector, 8362 int connector_type, 8363 struct dc_link *link, 8364 int link_index) 8365 { 8366 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8367 8368 /* 8369 * Some of the properties below require access to state, like bpc. 8370 * Allocate some default initial connector state with our reset helper. 8371 */ 8372 if (aconnector->base.funcs->reset) 8373 aconnector->base.funcs->reset(&aconnector->base); 8374 8375 aconnector->connector_id = link_index; 8376 aconnector->dc_link = link; 8377 aconnector->base.interlace_allowed = false; 8378 aconnector->base.doublescan_allowed = false; 8379 aconnector->base.stereo_allowed = false; 8380 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8381 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8382 aconnector->audio_inst = -1; 8383 mutex_init(&aconnector->hpd_lock); 8384 8385 /* 8386 * configure support HPD hot plug connector_>polled default value is 0 8387 * which means HPD hot plug not supported 8388 */ 8389 switch (connector_type) { 8390 case DRM_MODE_CONNECTOR_HDMIA: 8391 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8392 aconnector->base.ycbcr_420_allowed = 8393 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8394 break; 8395 case DRM_MODE_CONNECTOR_DisplayPort: 8396 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8397 link->link_enc = link_enc_cfg_get_link_enc(link); 8398 ASSERT(link->link_enc); 8399 if (link->link_enc) 8400 aconnector->base.ycbcr_420_allowed = 8401 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8402 break; 8403 case DRM_MODE_CONNECTOR_DVID: 8404 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8405 break; 8406 default: 8407 break; 8408 } 8409 8410 drm_object_attach_property(&aconnector->base.base, 8411 dm->ddev->mode_config.scaling_mode_property, 8412 DRM_MODE_SCALE_NONE); 8413 8414 drm_object_attach_property(&aconnector->base.base, 8415 adev->mode_info.underscan_property, 8416 UNDERSCAN_OFF); 8417 drm_object_attach_property(&aconnector->base.base, 8418 adev->mode_info.underscan_hborder_property, 8419 0); 8420 drm_object_attach_property(&aconnector->base.base, 8421 adev->mode_info.underscan_vborder_property, 8422 0); 8423 8424 if (!aconnector->mst_port) 8425 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8426 8427 /* This defaults to the max in the range, but we want 8bpc for non-edp. */ 8428 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; 8429 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8430 8431 if (connector_type == DRM_MODE_CONNECTOR_eDP && 8432 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 8433 drm_object_attach_property(&aconnector->base.base, 8434 adev->mode_info.abm_level_property, 0); 8435 } 8436 8437 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8438 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8439 connector_type == DRM_MODE_CONNECTOR_eDP) { 8440 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8441 8442 if (!aconnector->mst_port) 8443 drm_connector_attach_vrr_capable_property(&aconnector->base); 8444 8445 #ifdef CONFIG_DRM_AMD_DC_HDCP 8446 if (adev->dm.hdcp_workqueue) 8447 drm_connector_attach_content_protection_property(&aconnector->base, true); 8448 #endif 8449 } 8450 } 8451 8452 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8453 struct i2c_msg *msgs, int num) 8454 { 8455 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8456 struct ddc_service *ddc_service = i2c->ddc_service; 8457 struct i2c_command cmd; 8458 int i; 8459 int result = -EIO; 8460 8461 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8462 8463 if (!cmd.payloads) 8464 return result; 8465 8466 cmd.number_of_payloads = num; 8467 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8468 cmd.speed = 100; 8469 8470 for (i = 0; i < num; i++) { 8471 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8472 cmd.payloads[i].address = msgs[i].addr; 8473 cmd.payloads[i].length = msgs[i].len; 8474 cmd.payloads[i].data = msgs[i].buf; 8475 } 8476 8477 if (dc_submit_i2c( 8478 ddc_service->ctx->dc, 8479 ddc_service->ddc_pin->hw_info.ddc_channel, 8480 &cmd)) 8481 result = num; 8482 8483 kfree(cmd.payloads); 8484 return result; 8485 } 8486 8487 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8488 { 8489 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8490 } 8491 8492 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8493 .master_xfer = amdgpu_dm_i2c_xfer, 8494 .functionality = amdgpu_dm_i2c_func, 8495 }; 8496 8497 static struct amdgpu_i2c_adapter * 8498 create_i2c(struct ddc_service *ddc_service, 8499 int link_index, 8500 int *res) 8501 { 8502 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8503 struct amdgpu_i2c_adapter *i2c; 8504 8505 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8506 if (!i2c) 8507 return NULL; 8508 i2c->base.owner = THIS_MODULE; 8509 i2c->base.class = I2C_CLASS_DDC; 8510 i2c->base.dev.parent = &adev->pdev->dev; 8511 i2c->base.algo = &amdgpu_dm_i2c_algo; 8512 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8513 i2c_set_adapdata(&i2c->base, i2c); 8514 i2c->ddc_service = ddc_service; 8515 if (i2c->ddc_service->ddc_pin) 8516 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; 8517 8518 return i2c; 8519 } 8520 8521 8522 /* 8523 * Note: this function assumes that dc_link_detect() was called for the 8524 * dc_link which will be represented by this aconnector. 8525 */ 8526 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8527 struct amdgpu_dm_connector *aconnector, 8528 uint32_t link_index, 8529 struct amdgpu_encoder *aencoder) 8530 { 8531 int res = 0; 8532 int connector_type; 8533 struct dc *dc = dm->dc; 8534 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8535 struct amdgpu_i2c_adapter *i2c; 8536 8537 link->priv = aconnector; 8538 8539 DRM_DEBUG_DRIVER("%s()\n", __func__); 8540 8541 i2c = create_i2c(link->ddc, link->link_index, &res); 8542 if (!i2c) { 8543 DRM_ERROR("Failed to create i2c adapter data\n"); 8544 return -ENOMEM; 8545 } 8546 8547 aconnector->i2c = i2c; 8548 res = i2c_add_adapter(&i2c->base); 8549 8550 if (res) { 8551 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8552 goto out_free; 8553 } 8554 8555 connector_type = to_drm_connector_type(link->connector_signal); 8556 8557 res = drm_connector_init_with_ddc( 8558 dm->ddev, 8559 &aconnector->base, 8560 &amdgpu_dm_connector_funcs, 8561 connector_type, 8562 &i2c->base); 8563 8564 if (res) { 8565 DRM_ERROR("connector_init failed\n"); 8566 aconnector->connector_id = -1; 8567 goto out_free; 8568 } 8569 8570 drm_connector_helper_add( 8571 &aconnector->base, 8572 &amdgpu_dm_connector_helper_funcs); 8573 8574 amdgpu_dm_connector_init_helper( 8575 dm, 8576 aconnector, 8577 connector_type, 8578 link, 8579 link_index); 8580 8581 drm_connector_attach_encoder( 8582 &aconnector->base, &aencoder->base); 8583 8584 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8585 || connector_type == DRM_MODE_CONNECTOR_eDP) 8586 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8587 8588 out_free: 8589 if (res) { 8590 kfree(i2c); 8591 aconnector->i2c = NULL; 8592 } 8593 return res; 8594 } 8595 8596 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8597 { 8598 switch (adev->mode_info.num_crtc) { 8599 case 1: 8600 return 0x1; 8601 case 2: 8602 return 0x3; 8603 case 3: 8604 return 0x7; 8605 case 4: 8606 return 0xf; 8607 case 5: 8608 return 0x1f; 8609 case 6: 8610 default: 8611 return 0x3f; 8612 } 8613 } 8614 8615 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8616 struct amdgpu_encoder *aencoder, 8617 uint32_t link_index) 8618 { 8619 struct amdgpu_device *adev = drm_to_adev(dev); 8620 8621 int res = drm_encoder_init(dev, 8622 &aencoder->base, 8623 &amdgpu_dm_encoder_funcs, 8624 DRM_MODE_ENCODER_TMDS, 8625 NULL); 8626 8627 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8628 8629 if (!res) 8630 aencoder->encoder_id = link_index; 8631 else 8632 aencoder->encoder_id = -1; 8633 8634 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8635 8636 return res; 8637 } 8638 8639 static void manage_dm_interrupts(struct amdgpu_device *adev, 8640 struct amdgpu_crtc *acrtc, 8641 bool enable) 8642 { 8643 /* 8644 * We have no guarantee that the frontend index maps to the same 8645 * backend index - some even map to more than one. 8646 * 8647 * TODO: Use a different interrupt or check DC itself for the mapping. 8648 */ 8649 int irq_type = 8650 amdgpu_display_crtc_idx_to_irq_type( 8651 adev, 8652 acrtc->crtc_id); 8653 8654 if (enable) { 8655 drm_crtc_vblank_on(&acrtc->base); 8656 amdgpu_irq_get( 8657 adev, 8658 &adev->pageflip_irq, 8659 irq_type); 8660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8661 amdgpu_irq_get( 8662 adev, 8663 &adev->vline0_irq, 8664 irq_type); 8665 #endif 8666 } else { 8667 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8668 amdgpu_irq_put( 8669 adev, 8670 &adev->vline0_irq, 8671 irq_type); 8672 #endif 8673 amdgpu_irq_put( 8674 adev, 8675 &adev->pageflip_irq, 8676 irq_type); 8677 drm_crtc_vblank_off(&acrtc->base); 8678 } 8679 } 8680 8681 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8682 struct amdgpu_crtc *acrtc) 8683 { 8684 int irq_type = 8685 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8686 8687 /** 8688 * This reads the current state for the IRQ and force reapplies 8689 * the setting to hardware. 8690 */ 8691 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8692 } 8693 8694 static bool 8695 is_scaling_state_different(const struct dm_connector_state *dm_state, 8696 const struct dm_connector_state *old_dm_state) 8697 { 8698 if (dm_state->scaling != old_dm_state->scaling) 8699 return true; 8700 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8701 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8702 return true; 8703 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8704 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8705 return true; 8706 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8707 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8708 return true; 8709 return false; 8710 } 8711 8712 #ifdef CONFIG_DRM_AMD_DC_HDCP 8713 static bool is_content_protection_different(struct drm_connector_state *state, 8714 const struct drm_connector_state *old_state, 8715 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 8716 { 8717 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8718 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8719 8720 /* Handle: Type0/1 change */ 8721 if (old_state->hdcp_content_type != state->hdcp_content_type && 8722 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8723 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8724 return true; 8725 } 8726 8727 /* CP is being re enabled, ignore this 8728 * 8729 * Handles: ENABLED -> DESIRED 8730 */ 8731 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8732 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8733 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8734 return false; 8735 } 8736 8737 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8738 * 8739 * Handles: UNDESIRED -> ENABLED 8740 */ 8741 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8742 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8743 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8744 8745 /* Stream removed and re-enabled 8746 * 8747 * Can sometimes overlap with the HPD case, 8748 * thus set update_hdcp to false to avoid 8749 * setting HDCP multiple times. 8750 * 8751 * Handles: DESIRED -> DESIRED (Special case) 8752 */ 8753 if (!(old_state->crtc && old_state->crtc->enabled) && 8754 state->crtc && state->crtc->enabled && 8755 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8756 dm_con_state->update_hdcp = false; 8757 return true; 8758 } 8759 8760 /* Hot-plug, headless s3, dpms 8761 * 8762 * Only start HDCP if the display is connected/enabled. 8763 * update_hdcp flag will be set to false until the next 8764 * HPD comes in. 8765 * 8766 * Handles: DESIRED -> DESIRED (Special case) 8767 */ 8768 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8769 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8770 dm_con_state->update_hdcp = false; 8771 return true; 8772 } 8773 8774 /* 8775 * Handles: UNDESIRED -> UNDESIRED 8776 * DESIRED -> DESIRED 8777 * ENABLED -> ENABLED 8778 */ 8779 if (old_state->content_protection == state->content_protection) 8780 return false; 8781 8782 /* 8783 * Handles: UNDESIRED -> DESIRED 8784 * DESIRED -> UNDESIRED 8785 * ENABLED -> UNDESIRED 8786 */ 8787 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) 8788 return true; 8789 8790 /* 8791 * Handles: DESIRED -> ENABLED 8792 */ 8793 return false; 8794 } 8795 8796 #endif 8797 static void remove_stream(struct amdgpu_device *adev, 8798 struct amdgpu_crtc *acrtc, 8799 struct dc_stream_state *stream) 8800 { 8801 /* this is the update mode case */ 8802 8803 acrtc->otg_inst = -1; 8804 acrtc->enabled = false; 8805 } 8806 8807 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 8808 struct dc_cursor_position *position) 8809 { 8810 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8811 int x, y; 8812 int xorigin = 0, yorigin = 0; 8813 8814 if (!crtc || !plane->state->fb) 8815 return 0; 8816 8817 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 8818 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 8819 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 8820 __func__, 8821 plane->state->crtc_w, 8822 plane->state->crtc_h); 8823 return -EINVAL; 8824 } 8825 8826 x = plane->state->crtc_x; 8827 y = plane->state->crtc_y; 8828 8829 if (x <= -amdgpu_crtc->max_cursor_width || 8830 y <= -amdgpu_crtc->max_cursor_height) 8831 return 0; 8832 8833 if (x < 0) { 8834 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 8835 x = 0; 8836 } 8837 if (y < 0) { 8838 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 8839 y = 0; 8840 } 8841 position->enable = true; 8842 position->translate_by_source = true; 8843 position->x = x; 8844 position->y = y; 8845 position->x_hotspot = xorigin; 8846 position->y_hotspot = yorigin; 8847 8848 return 0; 8849 } 8850 8851 static void handle_cursor_update(struct drm_plane *plane, 8852 struct drm_plane_state *old_plane_state) 8853 { 8854 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8855 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8856 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8857 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8858 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8859 uint64_t address = afb ? afb->address : 0; 8860 struct dc_cursor_position position = {0}; 8861 struct dc_cursor_attributes attributes; 8862 int ret; 8863 8864 if (!plane->state->fb && !old_plane_state->fb) 8865 return; 8866 8867 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", 8868 __func__, 8869 amdgpu_crtc->crtc_id, 8870 plane->state->crtc_w, 8871 plane->state->crtc_h); 8872 8873 ret = get_cursor_position(plane, crtc, &position); 8874 if (ret) 8875 return; 8876 8877 if (!position.enable) { 8878 /* turn off cursor */ 8879 if (crtc_state && crtc_state->stream) { 8880 mutex_lock(&adev->dm.dc_lock); 8881 dc_stream_set_cursor_position(crtc_state->stream, 8882 &position); 8883 mutex_unlock(&adev->dm.dc_lock); 8884 } 8885 return; 8886 } 8887 8888 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8889 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8890 8891 memset(&attributes, 0, sizeof(attributes)); 8892 attributes.address.high_part = upper_32_bits(address); 8893 attributes.address.low_part = lower_32_bits(address); 8894 attributes.width = plane->state->crtc_w; 8895 attributes.height = plane->state->crtc_h; 8896 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8897 attributes.rotation_angle = 0; 8898 attributes.attribute_flags.value = 0; 8899 8900 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8901 8902 if (crtc_state->stream) { 8903 mutex_lock(&adev->dm.dc_lock); 8904 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8905 &attributes)) 8906 DRM_ERROR("DC failed to set cursor attributes\n"); 8907 8908 if (!dc_stream_set_cursor_position(crtc_state->stream, 8909 &position)) 8910 DRM_ERROR("DC failed to set cursor position\n"); 8911 mutex_unlock(&adev->dm.dc_lock); 8912 } 8913 } 8914 8915 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8916 { 8917 8918 assert_spin_locked(&acrtc->base.dev->event_lock); 8919 WARN_ON(acrtc->event); 8920 8921 acrtc->event = acrtc->base.state->event; 8922 8923 /* Set the flip status */ 8924 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8925 8926 /* Mark this event as consumed */ 8927 acrtc->base.state->event = NULL; 8928 8929 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8930 acrtc->crtc_id); 8931 } 8932 8933 static void update_freesync_state_on_stream( 8934 struct amdgpu_display_manager *dm, 8935 struct dm_crtc_state *new_crtc_state, 8936 struct dc_stream_state *new_stream, 8937 struct dc_plane_state *surface, 8938 u32 flip_timestamp_in_us) 8939 { 8940 struct mod_vrr_params vrr_params; 8941 struct dc_info_packet vrr_infopacket = {0}; 8942 struct amdgpu_device *adev = dm->adev; 8943 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8944 unsigned long flags; 8945 bool pack_sdp_v1_3 = false; 8946 8947 if (!new_stream) 8948 return; 8949 8950 /* 8951 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8952 * For now it's sufficient to just guard against these conditions. 8953 */ 8954 8955 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8956 return; 8957 8958 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8959 vrr_params = acrtc->dm_irq_params.vrr_params; 8960 8961 if (surface) { 8962 mod_freesync_handle_preflip( 8963 dm->freesync_module, 8964 surface, 8965 new_stream, 8966 flip_timestamp_in_us, 8967 &vrr_params); 8968 8969 if (adev->family < AMDGPU_FAMILY_AI && 8970 amdgpu_dm_vrr_active(new_crtc_state)) { 8971 mod_freesync_handle_v_update(dm->freesync_module, 8972 new_stream, &vrr_params); 8973 8974 /* Need to call this before the frame ends. */ 8975 dc_stream_adjust_vmin_vmax(dm->dc, 8976 new_crtc_state->stream, 8977 &vrr_params.adjust); 8978 } 8979 } 8980 8981 mod_freesync_build_vrr_infopacket( 8982 dm->freesync_module, 8983 new_stream, 8984 &vrr_params, 8985 PACKET_TYPE_VRR, 8986 TRANSFER_FUNC_UNKNOWN, 8987 &vrr_infopacket, 8988 pack_sdp_v1_3); 8989 8990 new_crtc_state->freesync_timing_changed |= 8991 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 8992 &vrr_params.adjust, 8993 sizeof(vrr_params.adjust)) != 0); 8994 8995 new_crtc_state->freesync_vrr_info_changed |= 8996 (memcmp(&new_crtc_state->vrr_infopacket, 8997 &vrr_infopacket, 8998 sizeof(vrr_infopacket)) != 0); 8999 9000 acrtc->dm_irq_params.vrr_params = vrr_params; 9001 new_crtc_state->vrr_infopacket = vrr_infopacket; 9002 9003 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; 9004 new_stream->vrr_infopacket = vrr_infopacket; 9005 9006 if (new_crtc_state->freesync_vrr_info_changed) 9007 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 9008 new_crtc_state->base.crtc->base.id, 9009 (int)new_crtc_state->base.vrr_enabled, 9010 (int)vrr_params.state); 9011 9012 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9013 } 9014 9015 static void update_stream_irq_parameters( 9016 struct amdgpu_display_manager *dm, 9017 struct dm_crtc_state *new_crtc_state) 9018 { 9019 struct dc_stream_state *new_stream = new_crtc_state->stream; 9020 struct mod_vrr_params vrr_params; 9021 struct mod_freesync_config config = new_crtc_state->freesync_config; 9022 struct amdgpu_device *adev = dm->adev; 9023 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 9024 unsigned long flags; 9025 9026 if (!new_stream) 9027 return; 9028 9029 /* 9030 * TODO: Determine why min/max totals and vrefresh can be 0 here. 9031 * For now it's sufficient to just guard against these conditions. 9032 */ 9033 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9034 return; 9035 9036 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9037 vrr_params = acrtc->dm_irq_params.vrr_params; 9038 9039 if (new_crtc_state->vrr_supported && 9040 config.min_refresh_in_uhz && 9041 config.max_refresh_in_uhz) { 9042 /* 9043 * if freesync compatible mode was set, config.state will be set 9044 * in atomic check 9045 */ 9046 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 9047 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 9048 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 9049 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 9050 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 9051 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 9052 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 9053 } else { 9054 config.state = new_crtc_state->base.vrr_enabled ? 9055 VRR_STATE_ACTIVE_VARIABLE : 9056 VRR_STATE_INACTIVE; 9057 } 9058 } else { 9059 config.state = VRR_STATE_UNSUPPORTED; 9060 } 9061 9062 mod_freesync_build_vrr_params(dm->freesync_module, 9063 new_stream, 9064 &config, &vrr_params); 9065 9066 new_crtc_state->freesync_timing_changed |= 9067 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 9068 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); 9069 9070 new_crtc_state->freesync_config = config; 9071 /* Copy state for access from DM IRQ handler */ 9072 acrtc->dm_irq_params.freesync_config = config; 9073 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 9074 acrtc->dm_irq_params.vrr_params = vrr_params; 9075 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9076 } 9077 9078 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 9079 struct dm_crtc_state *new_state) 9080 { 9081 bool old_vrr_active = amdgpu_dm_vrr_active(old_state); 9082 bool new_vrr_active = amdgpu_dm_vrr_active(new_state); 9083 9084 if (!old_vrr_active && new_vrr_active) { 9085 /* Transition VRR inactive -> active: 9086 * While VRR is active, we must not disable vblank irq, as a 9087 * reenable after disable would compute bogus vblank/pflip 9088 * timestamps if it likely happened inside display front-porch. 9089 * 9090 * We also need vupdate irq for the actual core vblank handling 9091 * at end of vblank. 9092 */ 9093 dm_set_vupdate_irq(new_state->base.crtc, true); 9094 drm_crtc_vblank_get(new_state->base.crtc); 9095 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 9096 __func__, new_state->base.crtc->base.id); 9097 } else if (old_vrr_active && !new_vrr_active) { 9098 /* Transition VRR active -> inactive: 9099 * Allow vblank irq disable again for fixed refresh rate. 9100 */ 9101 dm_set_vupdate_irq(new_state->base.crtc, false); 9102 drm_crtc_vblank_put(new_state->base.crtc); 9103 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 9104 __func__, new_state->base.crtc->base.id); 9105 } 9106 } 9107 9108 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 9109 { 9110 struct drm_plane *plane; 9111 struct drm_plane_state *old_plane_state; 9112 int i; 9113 9114 /* 9115 * TODO: Make this per-stream so we don't issue redundant updates for 9116 * commits with multiple streams. 9117 */ 9118 for_each_old_plane_in_state(state, plane, old_plane_state, i) 9119 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9120 handle_cursor_update(plane, old_plane_state); 9121 } 9122 9123 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 9124 struct dc_state *dc_state, 9125 struct drm_device *dev, 9126 struct amdgpu_display_manager *dm, 9127 struct drm_crtc *pcrtc, 9128 bool wait_for_vblank) 9129 { 9130 uint32_t i; 9131 uint64_t timestamp_ns; 9132 struct drm_plane *plane; 9133 struct drm_plane_state *old_plane_state, *new_plane_state; 9134 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 9135 struct drm_crtc_state *new_pcrtc_state = 9136 drm_atomic_get_new_crtc_state(state, pcrtc); 9137 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 9138 struct dm_crtc_state *dm_old_crtc_state = 9139 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 9140 int planes_count = 0, vpos, hpos; 9141 unsigned long flags; 9142 struct amdgpu_bo *abo; 9143 uint32_t target_vblank, last_flip_vblank; 9144 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 9145 bool pflip_present = false; 9146 struct { 9147 struct dc_surface_update surface_updates[MAX_SURFACES]; 9148 struct dc_plane_info plane_infos[MAX_SURFACES]; 9149 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 9150 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 9151 struct dc_stream_update stream_update; 9152 } *bundle; 9153 9154 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 9155 9156 if (!bundle) { 9157 dm_error("Failed to allocate update bundle\n"); 9158 goto cleanup; 9159 } 9160 9161 /* 9162 * Disable the cursor first if we're disabling all the planes. 9163 * It'll remain on the screen after the planes are re-enabled 9164 * if we don't. 9165 */ 9166 if (acrtc_state->active_planes == 0) 9167 amdgpu_dm_commit_cursors(state); 9168 9169 /* update planes when needed */ 9170 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9171 struct drm_crtc *crtc = new_plane_state->crtc; 9172 struct drm_crtc_state *new_crtc_state; 9173 struct drm_framebuffer *fb = new_plane_state->fb; 9174 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9175 bool plane_needs_flip; 9176 struct dc_plane_state *dc_plane; 9177 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9178 9179 /* Cursor plane is handled after stream updates */ 9180 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9181 continue; 9182 9183 if (!fb || !crtc || pcrtc != crtc) 9184 continue; 9185 9186 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9187 if (!new_crtc_state->active) 9188 continue; 9189 9190 dc_plane = dm_new_plane_state->dc_state; 9191 9192 bundle->surface_updates[planes_count].surface = dc_plane; 9193 if (new_pcrtc_state->color_mgmt_changed) { 9194 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 9195 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 9196 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9197 } 9198 9199 fill_dc_scaling_info(dm->adev, new_plane_state, 9200 &bundle->scaling_infos[planes_count]); 9201 9202 bundle->surface_updates[planes_count].scaling_info = 9203 &bundle->scaling_infos[planes_count]; 9204 9205 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9206 9207 pflip_present = pflip_present || plane_needs_flip; 9208 9209 if (!plane_needs_flip) { 9210 planes_count += 1; 9211 continue; 9212 } 9213 9214 abo = gem_to_amdgpu_bo(fb->obj[0]); 9215 fill_dc_plane_info_and_addr( 9216 dm->adev, new_plane_state, 9217 afb->tiling_flags, 9218 &bundle->plane_infos[planes_count], 9219 &bundle->flip_addrs[planes_count].address, 9220 afb->tmz_surface, false); 9221 9222 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9223 new_plane_state->plane->index, 9224 bundle->plane_infos[planes_count].dcc.enable); 9225 9226 bundle->surface_updates[planes_count].plane_info = 9227 &bundle->plane_infos[planes_count]; 9228 9229 /* 9230 * Only allow immediate flips for fast updates that don't 9231 * change FB pitch, DCC state, rotation or mirroing. 9232 */ 9233 bundle->flip_addrs[planes_count].flip_immediate = 9234 crtc->state->async_flip && 9235 acrtc_state->update_type == UPDATE_TYPE_FAST; 9236 9237 timestamp_ns = ktime_get_ns(); 9238 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9239 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9240 bundle->surface_updates[planes_count].surface = dc_plane; 9241 9242 if (!bundle->surface_updates[planes_count].surface) { 9243 DRM_ERROR("No surface for CRTC: id=%d\n", 9244 acrtc_attach->crtc_id); 9245 continue; 9246 } 9247 9248 if (plane == pcrtc->primary) 9249 update_freesync_state_on_stream( 9250 dm, 9251 acrtc_state, 9252 acrtc_state->stream, 9253 dc_plane, 9254 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9255 9256 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9257 __func__, 9258 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9259 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9260 9261 planes_count += 1; 9262 9263 } 9264 9265 if (pflip_present) { 9266 if (!vrr_active) { 9267 /* Use old throttling in non-vrr fixed refresh rate mode 9268 * to keep flip scheduling based on target vblank counts 9269 * working in a backwards compatible way, e.g., for 9270 * clients using the GLX_OML_sync_control extension or 9271 * DRI3/Present extension with defined target_msc. 9272 */ 9273 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9274 } 9275 else { 9276 /* For variable refresh rate mode only: 9277 * Get vblank of last completed flip to avoid > 1 vrr 9278 * flips per video frame by use of throttling, but allow 9279 * flip programming anywhere in the possibly large 9280 * variable vrr vblank interval for fine-grained flip 9281 * timing control and more opportunity to avoid stutter 9282 * on late submission of flips. 9283 */ 9284 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9285 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9286 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9287 } 9288 9289 target_vblank = last_flip_vblank + wait_for_vblank; 9290 9291 /* 9292 * Wait until we're out of the vertical blank period before the one 9293 * targeted by the flip 9294 */ 9295 while ((acrtc_attach->enabled && 9296 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9297 0, &vpos, &hpos, NULL, 9298 NULL, &pcrtc->hwmode) 9299 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9300 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9301 (int)(target_vblank - 9302 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9303 usleep_range(1000, 1100); 9304 } 9305 9306 /** 9307 * Prepare the flip event for the pageflip interrupt to handle. 9308 * 9309 * This only works in the case where we've already turned on the 9310 * appropriate hardware blocks (eg. HUBP) so in the transition case 9311 * from 0 -> n planes we have to skip a hardware generated event 9312 * and rely on sending it from software. 9313 */ 9314 if (acrtc_attach->base.state->event && 9315 acrtc_state->active_planes > 0 && 9316 !acrtc_state->force_dpms_off) { 9317 drm_crtc_vblank_get(pcrtc); 9318 9319 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9320 9321 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9322 prepare_flip_isr(acrtc_attach); 9323 9324 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9325 } 9326 9327 if (acrtc_state->stream) { 9328 if (acrtc_state->freesync_vrr_info_changed) 9329 bundle->stream_update.vrr_infopacket = 9330 &acrtc_state->stream->vrr_infopacket; 9331 } 9332 } 9333 9334 /* Update the planes if changed or disable if we don't have any. */ 9335 if ((planes_count || acrtc_state->active_planes == 0) && 9336 acrtc_state->stream) { 9337 /* 9338 * If PSR or idle optimizations are enabled then flush out 9339 * any pending work before hardware programming. 9340 */ 9341 if (dm->vblank_control_workqueue) 9342 flush_workqueue(dm->vblank_control_workqueue); 9343 9344 bundle->stream_update.stream = acrtc_state->stream; 9345 if (new_pcrtc_state->mode_changed) { 9346 bundle->stream_update.src = acrtc_state->stream->src; 9347 bundle->stream_update.dst = acrtc_state->stream->dst; 9348 } 9349 9350 if (new_pcrtc_state->color_mgmt_changed) { 9351 /* 9352 * TODO: This isn't fully correct since we've actually 9353 * already modified the stream in place. 9354 */ 9355 bundle->stream_update.gamut_remap = 9356 &acrtc_state->stream->gamut_remap_matrix; 9357 bundle->stream_update.output_csc_transform = 9358 &acrtc_state->stream->csc_color_matrix; 9359 bundle->stream_update.out_transfer_func = 9360 acrtc_state->stream->out_transfer_func; 9361 } 9362 9363 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9364 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9365 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9366 9367 /* 9368 * If FreeSync state on the stream has changed then we need to 9369 * re-adjust the min/max bounds now that DC doesn't handle this 9370 * as part of commit. 9371 */ 9372 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9373 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9374 dc_stream_adjust_vmin_vmax( 9375 dm->dc, acrtc_state->stream, 9376 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9377 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9378 } 9379 mutex_lock(&dm->dc_lock); 9380 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9381 acrtc_state->stream->link->psr_settings.psr_allow_active) 9382 amdgpu_dm_psr_disable(acrtc_state->stream); 9383 9384 dc_commit_updates_for_stream(dm->dc, 9385 bundle->surface_updates, 9386 planes_count, 9387 acrtc_state->stream, 9388 &bundle->stream_update, 9389 dc_state); 9390 9391 /** 9392 * Enable or disable the interrupts on the backend. 9393 * 9394 * Most pipes are put into power gating when unused. 9395 * 9396 * When power gating is enabled on a pipe we lose the 9397 * interrupt enablement state when power gating is disabled. 9398 * 9399 * So we need to update the IRQ control state in hardware 9400 * whenever the pipe turns on (since it could be previously 9401 * power gated) or off (since some pipes can't be power gated 9402 * on some ASICs). 9403 */ 9404 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9405 dm_update_pflip_irq_state(drm_to_adev(dev), 9406 acrtc_attach); 9407 9408 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9409 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 9410 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 9411 amdgpu_dm_link_setup_psr(acrtc_state->stream); 9412 9413 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 9414 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9415 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 9416 struct amdgpu_dm_connector *aconn = 9417 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9418 9419 if (aconn->psr_skip_count > 0) 9420 aconn->psr_skip_count--; 9421 9422 /* Allow PSR when skip count is 0. */ 9423 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 9424 } else { 9425 acrtc_attach->dm_irq_params.allow_psr_entry = false; 9426 } 9427 9428 mutex_unlock(&dm->dc_lock); 9429 } 9430 9431 /* 9432 * Update cursor state *after* programming all the planes. 9433 * This avoids redundant programming in the case where we're going 9434 * to be disabling a single plane - those pipes are being disabled. 9435 */ 9436 if (acrtc_state->active_planes) 9437 amdgpu_dm_commit_cursors(state); 9438 9439 cleanup: 9440 kfree(bundle); 9441 } 9442 9443 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9444 struct drm_atomic_state *state) 9445 { 9446 struct amdgpu_device *adev = drm_to_adev(dev); 9447 struct amdgpu_dm_connector *aconnector; 9448 struct drm_connector *connector; 9449 struct drm_connector_state *old_con_state, *new_con_state; 9450 struct drm_crtc_state *new_crtc_state; 9451 struct dm_crtc_state *new_dm_crtc_state; 9452 const struct dc_stream_status *status; 9453 int i, inst; 9454 9455 /* Notify device removals. */ 9456 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9457 if (old_con_state->crtc != new_con_state->crtc) { 9458 /* CRTC changes require notification. */ 9459 goto notify; 9460 } 9461 9462 if (!new_con_state->crtc) 9463 continue; 9464 9465 new_crtc_state = drm_atomic_get_new_crtc_state( 9466 state, new_con_state->crtc); 9467 9468 if (!new_crtc_state) 9469 continue; 9470 9471 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9472 continue; 9473 9474 notify: 9475 aconnector = to_amdgpu_dm_connector(connector); 9476 9477 mutex_lock(&adev->dm.audio_lock); 9478 inst = aconnector->audio_inst; 9479 aconnector->audio_inst = -1; 9480 mutex_unlock(&adev->dm.audio_lock); 9481 9482 amdgpu_dm_audio_eld_notify(adev, inst); 9483 } 9484 9485 /* Notify audio device additions. */ 9486 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9487 if (!new_con_state->crtc) 9488 continue; 9489 9490 new_crtc_state = drm_atomic_get_new_crtc_state( 9491 state, new_con_state->crtc); 9492 9493 if (!new_crtc_state) 9494 continue; 9495 9496 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9497 continue; 9498 9499 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9500 if (!new_dm_crtc_state->stream) 9501 continue; 9502 9503 status = dc_stream_get_status(new_dm_crtc_state->stream); 9504 if (!status) 9505 continue; 9506 9507 aconnector = to_amdgpu_dm_connector(connector); 9508 9509 mutex_lock(&adev->dm.audio_lock); 9510 inst = status->audio_inst; 9511 aconnector->audio_inst = inst; 9512 mutex_unlock(&adev->dm.audio_lock); 9513 9514 amdgpu_dm_audio_eld_notify(adev, inst); 9515 } 9516 } 9517 9518 /* 9519 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9520 * @crtc_state: the DRM CRTC state 9521 * @stream_state: the DC stream state. 9522 * 9523 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9524 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9525 */ 9526 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9527 struct dc_stream_state *stream_state) 9528 { 9529 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9530 } 9531 9532 /** 9533 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9534 * @state: The atomic state to commit 9535 * 9536 * This will tell DC to commit the constructed DC state from atomic_check, 9537 * programming the hardware. Any failures here implies a hardware failure, since 9538 * atomic check should have filtered anything non-kosher. 9539 */ 9540 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9541 { 9542 struct drm_device *dev = state->dev; 9543 struct amdgpu_device *adev = drm_to_adev(dev); 9544 struct amdgpu_display_manager *dm = &adev->dm; 9545 struct dm_atomic_state *dm_state; 9546 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 9547 uint32_t i, j; 9548 struct drm_crtc *crtc; 9549 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9550 unsigned long flags; 9551 bool wait_for_vblank = true; 9552 struct drm_connector *connector; 9553 struct drm_connector_state *old_con_state, *new_con_state; 9554 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9555 int crtc_disable_count = 0; 9556 bool mode_set_reset_required = false; 9557 int r; 9558 9559 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9560 9561 r = drm_atomic_helper_wait_for_fences(dev, state, false); 9562 if (unlikely(r)) 9563 DRM_ERROR("Waiting for fences timed out!"); 9564 9565 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9566 9567 dm_state = dm_atomic_get_new_state(state); 9568 if (dm_state && dm_state->context) { 9569 dc_state = dm_state->context; 9570 } else { 9571 /* No state changes, retain current state. */ 9572 dc_state_temp = dc_create_state(dm->dc); 9573 ASSERT(dc_state_temp); 9574 dc_state = dc_state_temp; 9575 dc_resource_state_copy_construct_current(dm->dc, dc_state); 9576 } 9577 9578 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, 9579 new_crtc_state, i) { 9580 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9581 9582 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9583 9584 if (old_crtc_state->active && 9585 (!new_crtc_state->active || 9586 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9587 manage_dm_interrupts(adev, acrtc, false); 9588 dc_stream_release(dm_old_crtc_state->stream); 9589 } 9590 } 9591 9592 drm_atomic_helper_calc_timestamping_constants(state); 9593 9594 /* update changed items */ 9595 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9596 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9597 9598 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9599 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9600 9601 drm_dbg_state(state->dev, 9602 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 9603 "planes_changed:%d, mode_changed:%d,active_changed:%d," 9604 "connectors_changed:%d\n", 9605 acrtc->crtc_id, 9606 new_crtc_state->enable, 9607 new_crtc_state->active, 9608 new_crtc_state->planes_changed, 9609 new_crtc_state->mode_changed, 9610 new_crtc_state->active_changed, 9611 new_crtc_state->connectors_changed); 9612 9613 /* Disable cursor if disabling crtc */ 9614 if (old_crtc_state->active && !new_crtc_state->active) { 9615 struct dc_cursor_position position; 9616 9617 memset(&position, 0, sizeof(position)); 9618 mutex_lock(&dm->dc_lock); 9619 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 9620 mutex_unlock(&dm->dc_lock); 9621 } 9622 9623 /* Copy all transient state flags into dc state */ 9624 if (dm_new_crtc_state->stream) { 9625 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9626 dm_new_crtc_state->stream); 9627 } 9628 9629 /* handles headless hotplug case, updating new_state and 9630 * aconnector as needed 9631 */ 9632 9633 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9634 9635 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 9636 9637 if (!dm_new_crtc_state->stream) { 9638 /* 9639 * this could happen because of issues with 9640 * userspace notifications delivery. 9641 * In this case userspace tries to set mode on 9642 * display which is disconnected in fact. 9643 * dc_sink is NULL in this case on aconnector. 9644 * We expect reset mode will come soon. 9645 * 9646 * This can also happen when unplug is done 9647 * during resume sequence ended 9648 * 9649 * In this case, we want to pretend we still 9650 * have a sink to keep the pipe running so that 9651 * hw state is consistent with the sw state 9652 */ 9653 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9654 __func__, acrtc->base.base.id); 9655 continue; 9656 } 9657 9658 if (dm_old_crtc_state->stream) 9659 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9660 9661 pm_runtime_get_noresume(dev->dev); 9662 9663 acrtc->enabled = true; 9664 acrtc->hw_mode = new_crtc_state->mode; 9665 crtc->hwmode = new_crtc_state->mode; 9666 mode_set_reset_required = true; 9667 } else if (modereset_required(new_crtc_state)) { 9668 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 9669 /* i.e. reset mode */ 9670 if (dm_old_crtc_state->stream) 9671 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9672 9673 mode_set_reset_required = true; 9674 } 9675 } /* for_each_crtc_in_state() */ 9676 9677 if (dc_state) { 9678 /* if there mode set or reset, disable eDP PSR */ 9679 if (mode_set_reset_required) { 9680 if (dm->vblank_control_workqueue) 9681 flush_workqueue(dm->vblank_control_workqueue); 9682 9683 amdgpu_dm_psr_disable_all(dm); 9684 } 9685 9686 dm_enable_per_frame_crtc_master_sync(dc_state); 9687 mutex_lock(&dm->dc_lock); 9688 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 9689 9690 /* Allow idle optimization when vblank count is 0 for display off */ 9691 if (dm->active_vblank_irq_count == 0) 9692 dc_allow_idle_optimizations(dm->dc, true); 9693 mutex_unlock(&dm->dc_lock); 9694 } 9695 9696 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9697 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9698 9699 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9700 9701 if (dm_new_crtc_state->stream != NULL) { 9702 const struct dc_stream_status *status = 9703 dc_stream_get_status(dm_new_crtc_state->stream); 9704 9705 if (!status) 9706 status = dc_stream_get_status_from_state(dc_state, 9707 dm_new_crtc_state->stream); 9708 if (!status) 9709 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 9710 else 9711 acrtc->otg_inst = status->primary_otg_inst; 9712 } 9713 } 9714 #ifdef CONFIG_DRM_AMD_DC_HDCP 9715 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9716 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9717 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9718 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9719 9720 new_crtc_state = NULL; 9721 9722 if (acrtc) 9723 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9724 9725 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9726 9727 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9728 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9729 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9730 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9731 dm_new_con_state->update_hdcp = true; 9732 continue; 9733 } 9734 9735 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 9736 hdcp_update_display( 9737 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9738 new_con_state->hdcp_content_type, 9739 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); 9740 } 9741 #endif 9742 9743 /* Handle connector state changes */ 9744 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9745 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9746 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9747 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9748 struct dc_surface_update dummy_updates[MAX_SURFACES]; 9749 struct dc_stream_update stream_update; 9750 struct dc_info_packet hdr_packet; 9751 struct dc_stream_status *status = NULL; 9752 bool abm_changed, hdr_changed, scaling_changed; 9753 9754 memset(&dummy_updates, 0, sizeof(dummy_updates)); 9755 memset(&stream_update, 0, sizeof(stream_update)); 9756 9757 if (acrtc) { 9758 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9759 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9760 } 9761 9762 /* Skip any modesets/resets */ 9763 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9764 continue; 9765 9766 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9767 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9768 9769 scaling_changed = is_scaling_state_different(dm_new_con_state, 9770 dm_old_con_state); 9771 9772 abm_changed = dm_new_crtc_state->abm_level != 9773 dm_old_crtc_state->abm_level; 9774 9775 hdr_changed = 9776 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9777 9778 if (!scaling_changed && !abm_changed && !hdr_changed) 9779 continue; 9780 9781 stream_update.stream = dm_new_crtc_state->stream; 9782 if (scaling_changed) { 9783 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9784 dm_new_con_state, dm_new_crtc_state->stream); 9785 9786 stream_update.src = dm_new_crtc_state->stream->src; 9787 stream_update.dst = dm_new_crtc_state->stream->dst; 9788 } 9789 9790 if (abm_changed) { 9791 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9792 9793 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9794 } 9795 9796 if (hdr_changed) { 9797 fill_hdr_info_packet(new_con_state, &hdr_packet); 9798 stream_update.hdr_static_metadata = &hdr_packet; 9799 } 9800 9801 status = dc_stream_get_status(dm_new_crtc_state->stream); 9802 9803 if (WARN_ON(!status)) 9804 continue; 9805 9806 WARN_ON(!status->plane_count); 9807 9808 /* 9809 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9810 * Here we create an empty update on each plane. 9811 * To fix this, DC should permit updating only stream properties. 9812 */ 9813 for (j = 0; j < status->plane_count; j++) 9814 dummy_updates[j].surface = status->plane_states[0]; 9815 9816 9817 mutex_lock(&dm->dc_lock); 9818 dc_commit_updates_for_stream(dm->dc, 9819 dummy_updates, 9820 status->plane_count, 9821 dm_new_crtc_state->stream, 9822 &stream_update, 9823 dc_state); 9824 mutex_unlock(&dm->dc_lock); 9825 } 9826 9827 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9828 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9829 new_crtc_state, i) { 9830 if (old_crtc_state->active && !new_crtc_state->active) 9831 crtc_disable_count++; 9832 9833 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9834 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9835 9836 /* For freesync config update on crtc state and params for irq */ 9837 update_stream_irq_parameters(dm, dm_new_crtc_state); 9838 9839 /* Handle vrr on->off / off->on transitions */ 9840 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 9841 dm_new_crtc_state); 9842 } 9843 9844 /** 9845 * Enable interrupts for CRTCs that are newly enabled or went through 9846 * a modeset. It was intentionally deferred until after the front end 9847 * state was modified to wait until the OTG was on and so the IRQ 9848 * handlers didn't access stale or invalid state. 9849 */ 9850 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9851 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9852 #ifdef CONFIG_DEBUG_FS 9853 bool configure_crc = false; 9854 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9855 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9856 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; 9857 #endif 9858 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9859 cur_crc_src = acrtc->dm_irq_params.crc_src; 9860 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9861 #endif 9862 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9863 9864 if (new_crtc_state->active && 9865 (!old_crtc_state->active || 9866 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9867 dc_stream_retain(dm_new_crtc_state->stream); 9868 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 9869 manage_dm_interrupts(adev, acrtc, true); 9870 9871 #ifdef CONFIG_DEBUG_FS 9872 /** 9873 * Frontend may have changed so reapply the CRC capture 9874 * settings for the stream. 9875 */ 9876 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9877 9878 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 9879 configure_crc = true; 9880 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9881 if (amdgpu_dm_crc_window_is_activated(crtc)) { 9882 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9883 acrtc->dm_irq_params.crc_window.update_win = true; 9884 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; 9885 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 9886 crc_rd_wrk->crtc = crtc; 9887 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 9888 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9889 } 9890 #endif 9891 } 9892 9893 if (configure_crc) 9894 if (amdgpu_dm_crtc_configure_crc_source( 9895 crtc, dm_new_crtc_state, cur_crc_src)) 9896 DRM_DEBUG_DRIVER("Failed to configure crc source"); 9897 #endif 9898 } 9899 } 9900 9901 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 9902 if (new_crtc_state->async_flip) 9903 wait_for_vblank = false; 9904 9905 /* update planes when needed per crtc*/ 9906 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 9907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9908 9909 if (dm_new_crtc_state->stream) 9910 amdgpu_dm_commit_planes(state, dc_state, dev, 9911 dm, crtc, wait_for_vblank); 9912 } 9913 9914 /* Update audio instances for each connector. */ 9915 amdgpu_dm_commit_audio(dev, state); 9916 9917 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ 9918 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 9919 /* restore the backlight level */ 9920 for (i = 0; i < dm->num_of_edps; i++) { 9921 if (dm->backlight_dev[i] && 9922 (dm->actual_brightness[i] != dm->brightness[i])) 9923 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9924 } 9925 #endif 9926 /* 9927 * send vblank event on all events not handled in flip and 9928 * mark consumed event for drm_atomic_helper_commit_hw_done 9929 */ 9930 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9931 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9932 9933 if (new_crtc_state->event) 9934 drm_send_event_locked(dev, &new_crtc_state->event->base); 9935 9936 new_crtc_state->event = NULL; 9937 } 9938 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9939 9940 /* Signal HW programming completion */ 9941 drm_atomic_helper_commit_hw_done(state); 9942 9943 if (wait_for_vblank) 9944 drm_atomic_helper_wait_for_flip_done(dev, state); 9945 9946 drm_atomic_helper_cleanup_planes(dev, state); 9947 9948 /* return the stolen vga memory back to VRAM */ 9949 if (!adev->mman.keep_stolen_vga_memory) 9950 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9951 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9952 9953 /* 9954 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9955 * so we can put the GPU into runtime suspend if we're not driving any 9956 * displays anymore 9957 */ 9958 for (i = 0; i < crtc_disable_count; i++) 9959 pm_runtime_put_autosuspend(dev->dev); 9960 pm_runtime_mark_last_busy(dev->dev); 9961 9962 if (dc_state_temp) 9963 dc_release_state(dc_state_temp); 9964 } 9965 9966 9967 static int dm_force_atomic_commit(struct drm_connector *connector) 9968 { 9969 int ret = 0; 9970 struct drm_device *ddev = connector->dev; 9971 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9972 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9973 struct drm_plane *plane = disconnected_acrtc->base.primary; 9974 struct drm_connector_state *conn_state; 9975 struct drm_crtc_state *crtc_state; 9976 struct drm_plane_state *plane_state; 9977 9978 if (!state) 9979 return -ENOMEM; 9980 9981 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9982 9983 /* Construct an atomic state to restore previous display setting */ 9984 9985 /* 9986 * Attach connectors to drm_atomic_state 9987 */ 9988 conn_state = drm_atomic_get_connector_state(state, connector); 9989 9990 ret = PTR_ERR_OR_ZERO(conn_state); 9991 if (ret) 9992 goto out; 9993 9994 /* Attach crtc to drm_atomic_state*/ 9995 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9996 9997 ret = PTR_ERR_OR_ZERO(crtc_state); 9998 if (ret) 9999 goto out; 10000 10001 /* force a restore */ 10002 crtc_state->mode_changed = true; 10003 10004 /* Attach plane to drm_atomic_state */ 10005 plane_state = drm_atomic_get_plane_state(state, plane); 10006 10007 ret = PTR_ERR_OR_ZERO(plane_state); 10008 if (ret) 10009 goto out; 10010 10011 /* Call commit internally with the state we just constructed */ 10012 ret = drm_atomic_commit(state); 10013 10014 out: 10015 drm_atomic_state_put(state); 10016 if (ret) 10017 DRM_ERROR("Restoring old state failed with %i\n", ret); 10018 10019 return ret; 10020 } 10021 10022 /* 10023 * This function handles all cases when set mode does not come upon hotplug. 10024 * This includes when a display is unplugged then plugged back into the 10025 * same port and when running without usermode desktop manager supprot 10026 */ 10027 void dm_restore_drm_connector_state(struct drm_device *dev, 10028 struct drm_connector *connector) 10029 { 10030 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 10031 struct amdgpu_crtc *disconnected_acrtc; 10032 struct dm_crtc_state *acrtc_state; 10033 10034 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10035 return; 10036 10037 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10038 if (!disconnected_acrtc) 10039 return; 10040 10041 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10042 if (!acrtc_state->stream) 10043 return; 10044 10045 /* 10046 * If the previous sink is not released and different from the current, 10047 * we deduce we are in a state where we can not rely on usermode call 10048 * to turn on the display, so we do it here 10049 */ 10050 if (acrtc_state->stream->sink != aconnector->dc_sink) 10051 dm_force_atomic_commit(&aconnector->base); 10052 } 10053 10054 /* 10055 * Grabs all modesetting locks to serialize against any blocking commits, 10056 * Waits for completion of all non blocking commits. 10057 */ 10058 static int do_aquire_global_lock(struct drm_device *dev, 10059 struct drm_atomic_state *state) 10060 { 10061 struct drm_crtc *crtc; 10062 struct drm_crtc_commit *commit; 10063 long ret; 10064 10065 /* 10066 * Adding all modeset locks to aquire_ctx will 10067 * ensure that when the framework release it the 10068 * extra locks we are locking here will get released to 10069 */ 10070 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10071 if (ret) 10072 return ret; 10073 10074 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10075 spin_lock(&crtc->commit_lock); 10076 commit = list_first_entry_or_null(&crtc->commit_list, 10077 struct drm_crtc_commit, commit_entry); 10078 if (commit) 10079 drm_crtc_commit_get(commit); 10080 spin_unlock(&crtc->commit_lock); 10081 10082 if (!commit) 10083 continue; 10084 10085 /* 10086 * Make sure all pending HW programming completed and 10087 * page flips done 10088 */ 10089 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10090 10091 if (ret > 0) 10092 ret = wait_for_completion_interruptible_timeout( 10093 &commit->flip_done, 10*HZ); 10094 10095 if (ret == 0) 10096 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 10097 "timed out\n", crtc->base.id, crtc->name); 10098 10099 drm_crtc_commit_put(commit); 10100 } 10101 10102 return ret < 0 ? ret : 0; 10103 } 10104 10105 static void get_freesync_config_for_crtc( 10106 struct dm_crtc_state *new_crtc_state, 10107 struct dm_connector_state *new_con_state) 10108 { 10109 struct mod_freesync_config config = {0}; 10110 struct amdgpu_dm_connector *aconnector = 10111 to_amdgpu_dm_connector(new_con_state->base.connector); 10112 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10113 int vrefresh = drm_mode_vrefresh(mode); 10114 bool fs_vid_mode = false; 10115 10116 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10117 vrefresh >= aconnector->min_vfreq && 10118 vrefresh <= aconnector->max_vfreq; 10119 10120 if (new_crtc_state->vrr_supported) { 10121 new_crtc_state->stream->ignore_msa_timing_param = true; 10122 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10123 10124 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10125 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10126 config.vsif_supported = true; 10127 config.btr = true; 10128 10129 if (fs_vid_mode) { 10130 config.state = VRR_STATE_ACTIVE_FIXED; 10131 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10132 goto out; 10133 } else if (new_crtc_state->base.vrr_enabled) { 10134 config.state = VRR_STATE_ACTIVE_VARIABLE; 10135 } else { 10136 config.state = VRR_STATE_INACTIVE; 10137 } 10138 } 10139 out: 10140 new_crtc_state->freesync_config = config; 10141 } 10142 10143 static void reset_freesync_config_for_crtc( 10144 struct dm_crtc_state *new_crtc_state) 10145 { 10146 new_crtc_state->vrr_supported = false; 10147 10148 memset(&new_crtc_state->vrr_infopacket, 0, 10149 sizeof(new_crtc_state->vrr_infopacket)); 10150 } 10151 10152 static bool 10153 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10154 struct drm_crtc_state *new_crtc_state) 10155 { 10156 const struct drm_display_mode *old_mode, *new_mode; 10157 10158 if (!old_crtc_state || !new_crtc_state) 10159 return false; 10160 10161 old_mode = &old_crtc_state->mode; 10162 new_mode = &new_crtc_state->mode; 10163 10164 if (old_mode->clock == new_mode->clock && 10165 old_mode->hdisplay == new_mode->hdisplay && 10166 old_mode->vdisplay == new_mode->vdisplay && 10167 old_mode->htotal == new_mode->htotal && 10168 old_mode->vtotal != new_mode->vtotal && 10169 old_mode->hsync_start == new_mode->hsync_start && 10170 old_mode->vsync_start != new_mode->vsync_start && 10171 old_mode->hsync_end == new_mode->hsync_end && 10172 old_mode->vsync_end != new_mode->vsync_end && 10173 old_mode->hskew == new_mode->hskew && 10174 old_mode->vscan == new_mode->vscan && 10175 (old_mode->vsync_end - old_mode->vsync_start) == 10176 (new_mode->vsync_end - new_mode->vsync_start)) 10177 return true; 10178 10179 return false; 10180 } 10181 10182 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { 10183 uint64_t num, den, res; 10184 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10185 10186 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10187 10188 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10189 den = (unsigned long long)new_crtc_state->mode.htotal * 10190 (unsigned long long)new_crtc_state->mode.vtotal; 10191 10192 res = div_u64(num, den); 10193 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10194 } 10195 10196 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10197 struct drm_atomic_state *state, 10198 struct drm_crtc *crtc, 10199 struct drm_crtc_state *old_crtc_state, 10200 struct drm_crtc_state *new_crtc_state, 10201 bool enable, 10202 bool *lock_and_validation_needed) 10203 { 10204 struct dm_atomic_state *dm_state = NULL; 10205 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10206 struct dc_stream_state *new_stream; 10207 int ret = 0; 10208 10209 /* 10210 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10211 * update changed items 10212 */ 10213 struct amdgpu_crtc *acrtc = NULL; 10214 struct amdgpu_dm_connector *aconnector = NULL; 10215 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10216 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10217 10218 new_stream = NULL; 10219 10220 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10221 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10222 acrtc = to_amdgpu_crtc(crtc); 10223 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10224 10225 /* TODO This hack should go away */ 10226 if (aconnector && enable) { 10227 /* Make sure fake sink is created in plug-in scenario */ 10228 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10229 &aconnector->base); 10230 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10231 &aconnector->base); 10232 10233 if (IS_ERR(drm_new_conn_state)) { 10234 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 10235 goto fail; 10236 } 10237 10238 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10239 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10240 10241 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10242 goto skip_modeset; 10243 10244 new_stream = create_validate_stream_for_sink(aconnector, 10245 &new_crtc_state->mode, 10246 dm_new_conn_state, 10247 dm_old_crtc_state->stream); 10248 10249 /* 10250 * we can have no stream on ACTION_SET if a display 10251 * was disconnected during S3, in this case it is not an 10252 * error, the OS will be updated after detection, and 10253 * will do the right thing on next atomic commit 10254 */ 10255 10256 if (!new_stream) { 10257 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 10258 __func__, acrtc->base.base.id); 10259 ret = -ENOMEM; 10260 goto fail; 10261 } 10262 10263 /* 10264 * TODO: Check VSDB bits to decide whether this should 10265 * be enabled or not. 10266 */ 10267 new_stream->triggered_crtc_reset.enabled = 10268 dm->force_timing_sync; 10269 10270 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10271 10272 ret = fill_hdr_info_packet(drm_new_conn_state, 10273 &new_stream->hdr_static_metadata); 10274 if (ret) 10275 goto fail; 10276 10277 /* 10278 * If we already removed the old stream from the context 10279 * (and set the new stream to NULL) then we can't reuse 10280 * the old stream even if the stream and scaling are unchanged. 10281 * We'll hit the BUG_ON and black screen. 10282 * 10283 * TODO: Refactor this function to allow this check to work 10284 * in all conditions. 10285 */ 10286 if (dm_new_crtc_state->stream && 10287 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10288 goto skip_modeset; 10289 10290 if (dm_new_crtc_state->stream && 10291 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10292 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10293 new_crtc_state->mode_changed = false; 10294 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10295 new_crtc_state->mode_changed); 10296 } 10297 } 10298 10299 /* mode_changed flag may get updated above, need to check again */ 10300 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10301 goto skip_modeset; 10302 10303 drm_dbg_state(state->dev, 10304 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 10305 "planes_changed:%d, mode_changed:%d,active_changed:%d," 10306 "connectors_changed:%d\n", 10307 acrtc->crtc_id, 10308 new_crtc_state->enable, 10309 new_crtc_state->active, 10310 new_crtc_state->planes_changed, 10311 new_crtc_state->mode_changed, 10312 new_crtc_state->active_changed, 10313 new_crtc_state->connectors_changed); 10314 10315 /* Remove stream for any changed/disabled CRTC */ 10316 if (!enable) { 10317 10318 if (!dm_old_crtc_state->stream) 10319 goto skip_modeset; 10320 10321 if (dm_new_crtc_state->stream && 10322 is_timing_unchanged_for_freesync(new_crtc_state, 10323 old_crtc_state)) { 10324 new_crtc_state->mode_changed = false; 10325 DRM_DEBUG_DRIVER( 10326 "Mode change not required for front porch change, " 10327 "setting mode_changed to %d", 10328 new_crtc_state->mode_changed); 10329 10330 set_freesync_fixed_config(dm_new_crtc_state); 10331 10332 goto skip_modeset; 10333 } else if (aconnector && 10334 is_freesync_video_mode(&new_crtc_state->mode, 10335 aconnector)) { 10336 struct drm_display_mode *high_mode; 10337 10338 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10339 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { 10340 set_freesync_fixed_config(dm_new_crtc_state); 10341 } 10342 } 10343 10344 ret = dm_atomic_get_state(state, &dm_state); 10345 if (ret) 10346 goto fail; 10347 10348 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10349 crtc->base.id); 10350 10351 /* i.e. reset mode */ 10352 if (dc_remove_stream_from_ctx( 10353 dm->dc, 10354 dm_state->context, 10355 dm_old_crtc_state->stream) != DC_OK) { 10356 ret = -EINVAL; 10357 goto fail; 10358 } 10359 10360 dc_stream_release(dm_old_crtc_state->stream); 10361 dm_new_crtc_state->stream = NULL; 10362 10363 reset_freesync_config_for_crtc(dm_new_crtc_state); 10364 10365 *lock_and_validation_needed = true; 10366 10367 } else {/* Add stream for any updated/enabled CRTC */ 10368 /* 10369 * Quick fix to prevent NULL pointer on new_stream when 10370 * added MST connectors not found in existing crtc_state in the chained mode 10371 * TODO: need to dig out the root cause of that 10372 */ 10373 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) 10374 goto skip_modeset; 10375 10376 if (modereset_required(new_crtc_state)) 10377 goto skip_modeset; 10378 10379 if (modeset_required(new_crtc_state, new_stream, 10380 dm_old_crtc_state->stream)) { 10381 10382 WARN_ON(dm_new_crtc_state->stream); 10383 10384 ret = dm_atomic_get_state(state, &dm_state); 10385 if (ret) 10386 goto fail; 10387 10388 dm_new_crtc_state->stream = new_stream; 10389 10390 dc_stream_retain(new_stream); 10391 10392 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10393 crtc->base.id); 10394 10395 if (dc_add_stream_to_ctx( 10396 dm->dc, 10397 dm_state->context, 10398 dm_new_crtc_state->stream) != DC_OK) { 10399 ret = -EINVAL; 10400 goto fail; 10401 } 10402 10403 *lock_and_validation_needed = true; 10404 } 10405 } 10406 10407 skip_modeset: 10408 /* Release extra reference */ 10409 if (new_stream) 10410 dc_stream_release(new_stream); 10411 10412 /* 10413 * We want to do dc stream updates that do not require a 10414 * full modeset below. 10415 */ 10416 if (!(enable && aconnector && new_crtc_state->active)) 10417 return 0; 10418 /* 10419 * Given above conditions, the dc state cannot be NULL because: 10420 * 1. We're in the process of enabling CRTCs (just been added 10421 * to the dc context, or already is on the context) 10422 * 2. Has a valid connector attached, and 10423 * 3. Is currently active and enabled. 10424 * => The dc stream state currently exists. 10425 */ 10426 BUG_ON(dm_new_crtc_state->stream == NULL); 10427 10428 /* Scaling or underscan settings */ 10429 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10430 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10431 update_stream_scaling_settings( 10432 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10433 10434 /* ABM settings */ 10435 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10436 10437 /* 10438 * Color management settings. We also update color properties 10439 * when a modeset is needed, to ensure it gets reprogrammed. 10440 */ 10441 if (dm_new_crtc_state->base.color_mgmt_changed || 10442 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10443 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10444 if (ret) 10445 goto fail; 10446 } 10447 10448 /* Update Freesync settings. */ 10449 get_freesync_config_for_crtc(dm_new_crtc_state, 10450 dm_new_conn_state); 10451 10452 return ret; 10453 10454 fail: 10455 if (new_stream) 10456 dc_stream_release(new_stream); 10457 return ret; 10458 } 10459 10460 static bool should_reset_plane(struct drm_atomic_state *state, 10461 struct drm_plane *plane, 10462 struct drm_plane_state *old_plane_state, 10463 struct drm_plane_state *new_plane_state) 10464 { 10465 struct drm_plane *other; 10466 struct drm_plane_state *old_other_state, *new_other_state; 10467 struct drm_crtc_state *new_crtc_state; 10468 int i; 10469 10470 /* 10471 * TODO: Remove this hack once the checks below are sufficient 10472 * enough to determine when we need to reset all the planes on 10473 * the stream. 10474 */ 10475 if (state->allow_modeset) 10476 return true; 10477 10478 /* Exit early if we know that we're adding or removing the plane. */ 10479 if (old_plane_state->crtc != new_plane_state->crtc) 10480 return true; 10481 10482 /* old crtc == new_crtc == NULL, plane not in context. */ 10483 if (!new_plane_state->crtc) 10484 return false; 10485 10486 new_crtc_state = 10487 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10488 10489 if (!new_crtc_state) 10490 return true; 10491 10492 /* CRTC Degamma changes currently require us to recreate planes. */ 10493 if (new_crtc_state->color_mgmt_changed) 10494 return true; 10495 10496 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10497 return true; 10498 10499 /* 10500 * If there are any new primary or overlay planes being added or 10501 * removed then the z-order can potentially change. To ensure 10502 * correct z-order and pipe acquisition the current DC architecture 10503 * requires us to remove and recreate all existing planes. 10504 * 10505 * TODO: Come up with a more elegant solution for this. 10506 */ 10507 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10508 struct amdgpu_framebuffer *old_afb, *new_afb; 10509 if (other->type == DRM_PLANE_TYPE_CURSOR) 10510 continue; 10511 10512 if (old_other_state->crtc != new_plane_state->crtc && 10513 new_other_state->crtc != new_plane_state->crtc) 10514 continue; 10515 10516 if (old_other_state->crtc != new_other_state->crtc) 10517 return true; 10518 10519 /* Src/dst size and scaling updates. */ 10520 if (old_other_state->src_w != new_other_state->src_w || 10521 old_other_state->src_h != new_other_state->src_h || 10522 old_other_state->crtc_w != new_other_state->crtc_w || 10523 old_other_state->crtc_h != new_other_state->crtc_h) 10524 return true; 10525 10526 /* Rotation / mirroring updates. */ 10527 if (old_other_state->rotation != new_other_state->rotation) 10528 return true; 10529 10530 /* Blending updates. */ 10531 if (old_other_state->pixel_blend_mode != 10532 new_other_state->pixel_blend_mode) 10533 return true; 10534 10535 /* Alpha updates. */ 10536 if (old_other_state->alpha != new_other_state->alpha) 10537 return true; 10538 10539 /* Colorspace changes. */ 10540 if (old_other_state->color_range != new_other_state->color_range || 10541 old_other_state->color_encoding != new_other_state->color_encoding) 10542 return true; 10543 10544 /* Framebuffer checks fall at the end. */ 10545 if (!old_other_state->fb || !new_other_state->fb) 10546 continue; 10547 10548 /* Pixel format changes can require bandwidth updates. */ 10549 if (old_other_state->fb->format != new_other_state->fb->format) 10550 return true; 10551 10552 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10553 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10554 10555 /* Tiling and DCC changes also require bandwidth updates. */ 10556 if (old_afb->tiling_flags != new_afb->tiling_flags || 10557 old_afb->base.modifier != new_afb->base.modifier) 10558 return true; 10559 } 10560 10561 return false; 10562 } 10563 10564 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10565 struct drm_plane_state *new_plane_state, 10566 struct drm_framebuffer *fb) 10567 { 10568 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10569 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10570 unsigned int pitch; 10571 bool linear; 10572 10573 if (fb->width > new_acrtc->max_cursor_width || 10574 fb->height > new_acrtc->max_cursor_height) { 10575 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10576 new_plane_state->fb->width, 10577 new_plane_state->fb->height); 10578 return -EINVAL; 10579 } 10580 if (new_plane_state->src_w != fb->width << 16 || 10581 new_plane_state->src_h != fb->height << 16) { 10582 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10583 return -EINVAL; 10584 } 10585 10586 /* Pitch in pixels */ 10587 pitch = fb->pitches[0] / fb->format->cpp[0]; 10588 10589 if (fb->width != pitch) { 10590 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10591 fb->width, pitch); 10592 return -EINVAL; 10593 } 10594 10595 switch (pitch) { 10596 case 64: 10597 case 128: 10598 case 256: 10599 /* FB pitch is supported by cursor plane */ 10600 break; 10601 default: 10602 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10603 return -EINVAL; 10604 } 10605 10606 /* Core DRM takes care of checking FB modifiers, so we only need to 10607 * check tiling flags when the FB doesn't have a modifier. */ 10608 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10609 if (adev->family < AMDGPU_FAMILY_AI) { 10610 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10611 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10612 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10613 } else { 10614 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10615 } 10616 if (!linear) { 10617 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10618 return -EINVAL; 10619 } 10620 } 10621 10622 return 0; 10623 } 10624 10625 static int dm_update_plane_state(struct dc *dc, 10626 struct drm_atomic_state *state, 10627 struct drm_plane *plane, 10628 struct drm_plane_state *old_plane_state, 10629 struct drm_plane_state *new_plane_state, 10630 bool enable, 10631 bool *lock_and_validation_needed) 10632 { 10633 10634 struct dm_atomic_state *dm_state = NULL; 10635 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10636 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10637 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10638 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10639 struct amdgpu_crtc *new_acrtc; 10640 bool needs_reset; 10641 int ret = 0; 10642 10643 10644 new_plane_crtc = new_plane_state->crtc; 10645 old_plane_crtc = old_plane_state->crtc; 10646 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10647 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10648 10649 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 10650 if (!enable || !new_plane_crtc || 10651 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10652 return 0; 10653 10654 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10655 10656 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10657 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10658 return -EINVAL; 10659 } 10660 10661 if (new_plane_state->fb) { 10662 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10663 new_plane_state->fb); 10664 if (ret) 10665 return ret; 10666 } 10667 10668 return 0; 10669 } 10670 10671 needs_reset = should_reset_plane(state, plane, old_plane_state, 10672 new_plane_state); 10673 10674 /* Remove any changed/removed planes */ 10675 if (!enable) { 10676 if (!needs_reset) 10677 return 0; 10678 10679 if (!old_plane_crtc) 10680 return 0; 10681 10682 old_crtc_state = drm_atomic_get_old_crtc_state( 10683 state, old_plane_crtc); 10684 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10685 10686 if (!dm_old_crtc_state->stream) 10687 return 0; 10688 10689 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10690 plane->base.id, old_plane_crtc->base.id); 10691 10692 ret = dm_atomic_get_state(state, &dm_state); 10693 if (ret) 10694 return ret; 10695 10696 if (!dc_remove_plane_from_context( 10697 dc, 10698 dm_old_crtc_state->stream, 10699 dm_old_plane_state->dc_state, 10700 dm_state->context)) { 10701 10702 return -EINVAL; 10703 } 10704 10705 10706 dc_plane_state_release(dm_old_plane_state->dc_state); 10707 dm_new_plane_state->dc_state = NULL; 10708 10709 *lock_and_validation_needed = true; 10710 10711 } else { /* Add new planes */ 10712 struct dc_plane_state *dc_new_plane_state; 10713 10714 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10715 return 0; 10716 10717 if (!new_plane_crtc) 10718 return 0; 10719 10720 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 10721 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10722 10723 if (!dm_new_crtc_state->stream) 10724 return 0; 10725 10726 if (!needs_reset) 10727 return 0; 10728 10729 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 10730 if (ret) 10731 return ret; 10732 10733 WARN_ON(dm_new_plane_state->dc_state); 10734 10735 dc_new_plane_state = dc_create_plane_state(dc); 10736 if (!dc_new_plane_state) 10737 return -ENOMEM; 10738 10739 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 10740 plane->base.id, new_plane_crtc->base.id); 10741 10742 ret = fill_dc_plane_attributes( 10743 drm_to_adev(new_plane_crtc->dev), 10744 dc_new_plane_state, 10745 new_plane_state, 10746 new_crtc_state); 10747 if (ret) { 10748 dc_plane_state_release(dc_new_plane_state); 10749 return ret; 10750 } 10751 10752 ret = dm_atomic_get_state(state, &dm_state); 10753 if (ret) { 10754 dc_plane_state_release(dc_new_plane_state); 10755 return ret; 10756 } 10757 10758 /* 10759 * Any atomic check errors that occur after this will 10760 * not need a release. The plane state will be attached 10761 * to the stream, and therefore part of the atomic 10762 * state. It'll be released when the atomic state is 10763 * cleaned. 10764 */ 10765 if (!dc_add_plane_to_context( 10766 dc, 10767 dm_new_crtc_state->stream, 10768 dc_new_plane_state, 10769 dm_state->context)) { 10770 10771 dc_plane_state_release(dc_new_plane_state); 10772 return -EINVAL; 10773 } 10774 10775 dm_new_plane_state->dc_state = dc_new_plane_state; 10776 10777 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 10778 10779 /* Tell DC to do a full surface update every time there 10780 * is a plane change. Inefficient, but works for now. 10781 */ 10782 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 10783 10784 *lock_and_validation_needed = true; 10785 } 10786 10787 10788 return ret; 10789 } 10790 10791 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 10792 int *src_w, int *src_h) 10793 { 10794 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 10795 case DRM_MODE_ROTATE_90: 10796 case DRM_MODE_ROTATE_270: 10797 *src_w = plane_state->src_h >> 16; 10798 *src_h = plane_state->src_w >> 16; 10799 break; 10800 case DRM_MODE_ROTATE_0: 10801 case DRM_MODE_ROTATE_180: 10802 default: 10803 *src_w = plane_state->src_w >> 16; 10804 *src_h = plane_state->src_h >> 16; 10805 break; 10806 } 10807 } 10808 10809 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 10810 struct drm_crtc *crtc, 10811 struct drm_crtc_state *new_crtc_state) 10812 { 10813 struct drm_plane *cursor = crtc->cursor, *underlying; 10814 struct drm_plane_state *new_cursor_state, *new_underlying_state; 10815 int i; 10816 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; 10817 int cursor_src_w, cursor_src_h; 10818 int underlying_src_w, underlying_src_h; 10819 10820 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 10821 * cursor per pipe but it's going to inherit the scaling and 10822 * positioning from the underlying pipe. Check the cursor plane's 10823 * blending properties match the underlying planes'. */ 10824 10825 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); 10826 if (!new_cursor_state || !new_cursor_state->fb) { 10827 return 0; 10828 } 10829 10830 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); 10831 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; 10832 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; 10833 10834 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { 10835 /* Narrow down to non-cursor planes on the same CRTC as the cursor */ 10836 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) 10837 continue; 10838 10839 /* Ignore disabled planes */ 10840 if (!new_underlying_state->fb) 10841 continue; 10842 10843 dm_get_oriented_plane_size(new_underlying_state, 10844 &underlying_src_w, &underlying_src_h); 10845 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; 10846 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; 10847 10848 if (cursor_scale_w != underlying_scale_w || 10849 cursor_scale_h != underlying_scale_h) { 10850 drm_dbg_atomic(crtc->dev, 10851 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", 10852 cursor->base.id, cursor->name, underlying->base.id, underlying->name); 10853 return -EINVAL; 10854 } 10855 10856 /* If this plane covers the whole CRTC, no need to check planes underneath */ 10857 if (new_underlying_state->crtc_x <= 0 && 10858 new_underlying_state->crtc_y <= 0 && 10859 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && 10860 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) 10861 break; 10862 } 10863 10864 return 0; 10865 } 10866 10867 #if defined(CONFIG_DRM_AMD_DC_DCN) 10868 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 10869 { 10870 struct drm_connector *connector; 10871 struct drm_connector_state *conn_state, *old_conn_state; 10872 struct amdgpu_dm_connector *aconnector = NULL; 10873 int i; 10874 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 10875 if (!conn_state->crtc) 10876 conn_state = old_conn_state; 10877 10878 if (conn_state->crtc != crtc) 10879 continue; 10880 10881 aconnector = to_amdgpu_dm_connector(connector); 10882 if (!aconnector->port || !aconnector->mst_port) 10883 aconnector = NULL; 10884 else 10885 break; 10886 } 10887 10888 if (!aconnector) 10889 return 0; 10890 10891 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); 10892 } 10893 #endif 10894 10895 /** 10896 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10897 * @dev: The DRM device 10898 * @state: The atomic state to commit 10899 * 10900 * Validate that the given atomic state is programmable by DC into hardware. 10901 * This involves constructing a &struct dc_state reflecting the new hardware 10902 * state we wish to commit, then querying DC to see if it is programmable. It's 10903 * important not to modify the existing DC state. Otherwise, atomic_check 10904 * may unexpectedly commit hardware changes. 10905 * 10906 * When validating the DC state, it's important that the right locks are 10907 * acquired. For full updates case which removes/adds/updates streams on one 10908 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10909 * that any such full update commit will wait for completion of any outstanding 10910 * flip using DRMs synchronization events. 10911 * 10912 * Note that DM adds the affected connectors for all CRTCs in state, when that 10913 * might not seem necessary. This is because DC stream creation requires the 10914 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10915 * be possible but non-trivial - a possible TODO item. 10916 * 10917 * Return: -Error code if validation failed. 10918 */ 10919 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10920 struct drm_atomic_state *state) 10921 { 10922 struct amdgpu_device *adev = drm_to_adev(dev); 10923 struct dm_atomic_state *dm_state = NULL; 10924 struct dc *dc = adev->dm.dc; 10925 struct drm_connector *connector; 10926 struct drm_connector_state *old_con_state, *new_con_state; 10927 struct drm_crtc *crtc; 10928 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10929 struct drm_plane *plane; 10930 struct drm_plane_state *old_plane_state, *new_plane_state; 10931 enum dc_status status; 10932 int ret, i; 10933 bool lock_and_validation_needed = false; 10934 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10935 #if defined(CONFIG_DRM_AMD_DC_DCN) 10936 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10937 struct drm_dp_mst_topology_state *mst_state; 10938 struct drm_dp_mst_topology_mgr *mgr; 10939 #endif 10940 10941 trace_amdgpu_dm_atomic_check_begin(state); 10942 10943 ret = drm_atomic_helper_check_modeset(dev, state); 10944 if (ret) { 10945 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); 10946 goto fail; 10947 } 10948 10949 /* Check connector changes */ 10950 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10951 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10952 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10953 10954 /* Skip connectors that are disabled or part of modeset already. */ 10955 if (!old_con_state->crtc && !new_con_state->crtc) 10956 continue; 10957 10958 if (!new_con_state->crtc) 10959 continue; 10960 10961 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10962 if (IS_ERR(new_crtc_state)) { 10963 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); 10964 ret = PTR_ERR(new_crtc_state); 10965 goto fail; 10966 } 10967 10968 if (dm_old_con_state->abm_level != 10969 dm_new_con_state->abm_level) 10970 new_crtc_state->connectors_changed = true; 10971 } 10972 10973 #if defined(CONFIG_DRM_AMD_DC_DCN) 10974 if (dc_resource_is_dsc_encoding_supported(dc)) { 10975 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10976 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10977 ret = add_affected_mst_dsc_crtcs(state, crtc); 10978 if (ret) { 10979 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); 10980 goto fail; 10981 } 10982 } 10983 } 10984 pre_validate_dsc(state, &dm_state, vars); 10985 } 10986 #endif 10987 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10988 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10989 10990 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10991 !new_crtc_state->color_mgmt_changed && 10992 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10993 dm_old_crtc_state->dsc_force_changed == false) 10994 continue; 10995 10996 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10997 if (ret) { 10998 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); 10999 goto fail; 11000 } 11001 11002 if (!new_crtc_state->enable) 11003 continue; 11004 11005 ret = drm_atomic_add_affected_connectors(state, crtc); 11006 if (ret) { 11007 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); 11008 goto fail; 11009 } 11010 11011 ret = drm_atomic_add_affected_planes(state, crtc); 11012 if (ret) { 11013 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); 11014 goto fail; 11015 } 11016 11017 if (dm_old_crtc_state->dsc_force_changed) 11018 new_crtc_state->mode_changed = true; 11019 } 11020 11021 /* 11022 * Add all primary and overlay planes on the CRTC to the state 11023 * whenever a plane is enabled to maintain correct z-ordering 11024 * and to enable fast surface updates. 11025 */ 11026 drm_for_each_crtc(crtc, dev) { 11027 bool modified = false; 11028 11029 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11030 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11031 continue; 11032 11033 if (new_plane_state->crtc == crtc || 11034 old_plane_state->crtc == crtc) { 11035 modified = true; 11036 break; 11037 } 11038 } 11039 11040 if (!modified) 11041 continue; 11042 11043 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11044 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11045 continue; 11046 11047 new_plane_state = 11048 drm_atomic_get_plane_state(state, plane); 11049 11050 if (IS_ERR(new_plane_state)) { 11051 ret = PTR_ERR(new_plane_state); 11052 DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); 11053 goto fail; 11054 } 11055 } 11056 } 11057 11058 /* Remove exiting planes if they are modified */ 11059 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 11060 ret = dm_update_plane_state(dc, state, plane, 11061 old_plane_state, 11062 new_plane_state, 11063 false, 11064 &lock_and_validation_needed); 11065 if (ret) { 11066 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 11067 goto fail; 11068 } 11069 } 11070 11071 /* Disable all crtcs which require disable */ 11072 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11073 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11074 old_crtc_state, 11075 new_crtc_state, 11076 false, 11077 &lock_and_validation_needed); 11078 if (ret) { 11079 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); 11080 goto fail; 11081 } 11082 } 11083 11084 /* Enable all crtcs which require enable */ 11085 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11086 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11087 old_crtc_state, 11088 new_crtc_state, 11089 true, 11090 &lock_and_validation_needed); 11091 if (ret) { 11092 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); 11093 goto fail; 11094 } 11095 } 11096 11097 /* Add new/modified planes */ 11098 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 11099 ret = dm_update_plane_state(dc, state, plane, 11100 old_plane_state, 11101 new_plane_state, 11102 true, 11103 &lock_and_validation_needed); 11104 if (ret) { 11105 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 11106 goto fail; 11107 } 11108 } 11109 11110 /* Run this here since we want to validate the streams we created */ 11111 ret = drm_atomic_helper_check_planes(dev, state); 11112 if (ret) { 11113 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); 11114 goto fail; 11115 } 11116 11117 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11118 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11119 if (dm_new_crtc_state->mpo_requested) 11120 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); 11121 } 11122 11123 /* Check cursor planes scaling */ 11124 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11125 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 11126 if (ret) { 11127 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); 11128 goto fail; 11129 } 11130 } 11131 11132 if (state->legacy_cursor_update) { 11133 /* 11134 * This is a fast cursor update coming from the plane update 11135 * helper, check if it can be done asynchronously for better 11136 * performance. 11137 */ 11138 state->async_update = 11139 !drm_atomic_helper_async_check(dev, state); 11140 11141 /* 11142 * Skip the remaining global validation if this is an async 11143 * update. Cursor updates can be done without affecting 11144 * state or bandwidth calcs and this avoids the performance 11145 * penalty of locking the private state object and 11146 * allocating a new dc_state. 11147 */ 11148 if (state->async_update) 11149 return 0; 11150 } 11151 11152 /* Check scaling and underscan changes*/ 11153 /* TODO Removed scaling changes validation due to inability to commit 11154 * new stream into context w\o causing full reset. Need to 11155 * decide how to handle. 11156 */ 11157 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11158 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11159 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11160 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11161 11162 /* Skip any modesets/resets */ 11163 if (!acrtc || drm_atomic_crtc_needs_modeset( 11164 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 11165 continue; 11166 11167 /* Skip any thing not scale or underscan changes */ 11168 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 11169 continue; 11170 11171 lock_and_validation_needed = true; 11172 } 11173 11174 #if defined(CONFIG_DRM_AMD_DC_DCN) 11175 /* set the slot info for each mst_state based on the link encoding format */ 11176 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 11177 struct amdgpu_dm_connector *aconnector; 11178 struct drm_connector *connector; 11179 struct drm_connector_list_iter iter; 11180 u8 link_coding_cap; 11181 11182 if (!mgr->mst_state ) 11183 continue; 11184 11185 drm_connector_list_iter_begin(dev, &iter); 11186 drm_for_each_connector_iter(connector, &iter) { 11187 int id = connector->index; 11188 11189 if (id == mst_state->mgr->conn_base_id) { 11190 aconnector = to_amdgpu_dm_connector(connector); 11191 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 11192 drm_dp_mst_update_slots(mst_state, link_coding_cap); 11193 11194 break; 11195 } 11196 } 11197 drm_connector_list_iter_end(&iter); 11198 11199 } 11200 #endif 11201 /** 11202 * Streams and planes are reset when there are changes that affect 11203 * bandwidth. Anything that affects bandwidth needs to go through 11204 * DC global validation to ensure that the configuration can be applied 11205 * to hardware. 11206 * 11207 * We have to currently stall out here in atomic_check for outstanding 11208 * commits to finish in this case because our IRQ handlers reference 11209 * DRM state directly - we can end up disabling interrupts too early 11210 * if we don't. 11211 * 11212 * TODO: Remove this stall and drop DM state private objects. 11213 */ 11214 if (lock_and_validation_needed) { 11215 ret = dm_atomic_get_state(state, &dm_state); 11216 if (ret) { 11217 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); 11218 goto fail; 11219 } 11220 11221 ret = do_aquire_global_lock(dev, state); 11222 if (ret) { 11223 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); 11224 goto fail; 11225 } 11226 11227 #if defined(CONFIG_DRM_AMD_DC_DCN) 11228 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { 11229 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 11230 goto fail; 11231 } 11232 11233 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 11234 if (ret) { 11235 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); 11236 goto fail; 11237 } 11238 #endif 11239 11240 /* 11241 * Perform validation of MST topology in the state: 11242 * We need to perform MST atomic check before calling 11243 * dc_validate_global_state(), or there is a chance 11244 * to get stuck in an infinite loop and hang eventually. 11245 */ 11246 ret = drm_dp_mst_atomic_check(state); 11247 if (ret) { 11248 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); 11249 goto fail; 11250 } 11251 status = dc_validate_global_state(dc, dm_state->context, true); 11252 if (status != DC_OK) { 11253 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", 11254 dc_status_to_str(status), status); 11255 ret = -EINVAL; 11256 goto fail; 11257 } 11258 } else { 11259 /* 11260 * The commit is a fast update. Fast updates shouldn't change 11261 * the DC context, affect global validation, and can have their 11262 * commit work done in parallel with other commits not touching 11263 * the same resource. If we have a new DC context as part of 11264 * the DM atomic state from validation we need to free it and 11265 * retain the existing one instead. 11266 * 11267 * Furthermore, since the DM atomic state only contains the DC 11268 * context and can safely be annulled, we can free the state 11269 * and clear the associated private object now to free 11270 * some memory and avoid a possible use-after-free later. 11271 */ 11272 11273 for (i = 0; i < state->num_private_objs; i++) { 11274 struct drm_private_obj *obj = state->private_objs[i].ptr; 11275 11276 if (obj->funcs == adev->dm.atomic_obj.funcs) { 11277 int j = state->num_private_objs-1; 11278 11279 dm_atomic_destroy_state(obj, 11280 state->private_objs[i].state); 11281 11282 /* If i is not at the end of the array then the 11283 * last element needs to be moved to where i was 11284 * before the array can safely be truncated. 11285 */ 11286 if (i != j) 11287 state->private_objs[i] = 11288 state->private_objs[j]; 11289 11290 state->private_objs[j].ptr = NULL; 11291 state->private_objs[j].state = NULL; 11292 state->private_objs[j].old_state = NULL; 11293 state->private_objs[j].new_state = NULL; 11294 11295 state->num_private_objs = j; 11296 break; 11297 } 11298 } 11299 } 11300 11301 /* Store the overall update type for use later in atomic check. */ 11302 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 11303 struct dm_crtc_state *dm_new_crtc_state = 11304 to_dm_crtc_state(new_crtc_state); 11305 11306 dm_new_crtc_state->update_type = lock_and_validation_needed ? 11307 UPDATE_TYPE_FULL : 11308 UPDATE_TYPE_FAST; 11309 } 11310 11311 /* Must be success */ 11312 WARN_ON(ret); 11313 11314 trace_amdgpu_dm_atomic_check_finish(state, ret); 11315 11316 return ret; 11317 11318 fail: 11319 if (ret == -EDEADLK) 11320 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 11321 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 11322 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 11323 else 11324 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 11325 11326 trace_amdgpu_dm_atomic_check_finish(state, ret); 11327 11328 return ret; 11329 } 11330 11331 static bool is_dp_capable_without_timing_msa(struct dc *dc, 11332 struct amdgpu_dm_connector *amdgpu_dm_connector) 11333 { 11334 uint8_t dpcd_data; 11335 bool capable = false; 11336 11337 if (amdgpu_dm_connector->dc_link && 11338 dm_helpers_dp_read_dpcd( 11339 NULL, 11340 amdgpu_dm_connector->dc_link, 11341 DP_DOWN_STREAM_PORT_COUNT, 11342 &dpcd_data, 11343 sizeof(dpcd_data))) { 11344 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 11345 } 11346 11347 return capable; 11348 } 11349 11350 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 11351 unsigned int offset, 11352 unsigned int total_length, 11353 uint8_t *data, 11354 unsigned int length, 11355 struct amdgpu_hdmi_vsdb_info *vsdb) 11356 { 11357 bool res; 11358 union dmub_rb_cmd cmd; 11359 struct dmub_cmd_send_edid_cea *input; 11360 struct dmub_cmd_edid_cea_output *output; 11361 11362 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11363 return false; 11364 11365 memset(&cmd, 0, sizeof(cmd)); 11366 11367 input = &cmd.edid_cea.data.input; 11368 11369 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11370 cmd.edid_cea.header.sub_type = 0; 11371 cmd.edid_cea.header.payload_bytes = 11372 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11373 input->offset = offset; 11374 input->length = length; 11375 input->cea_total_length = total_length; 11376 memcpy(input->payload, data, length); 11377 11378 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); 11379 if (!res) { 11380 DRM_ERROR("EDID CEA parser failed\n"); 11381 return false; 11382 } 11383 11384 output = &cmd.edid_cea.data.output; 11385 11386 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11387 if (!output->ack.success) { 11388 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11389 output->ack.offset); 11390 } 11391 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11392 if (!output->amd_vsdb.vsdb_found) 11393 return false; 11394 11395 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11396 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11397 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11398 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11399 } else { 11400 DRM_WARN("Unknown EDID CEA parser results\n"); 11401 return false; 11402 } 11403 11404 return true; 11405 } 11406 11407 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11408 uint8_t *edid_ext, int len, 11409 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11410 { 11411 int i; 11412 11413 /* send extension block to DMCU for parsing */ 11414 for (i = 0; i < len; i += 8) { 11415 bool res; 11416 int offset; 11417 11418 /* send 8 bytes a time */ 11419 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11420 return false; 11421 11422 if (i+8 == len) { 11423 /* EDID block sent completed, expect result */ 11424 int version, min_rate, max_rate; 11425 11426 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11427 if (res) { 11428 /* amd vsdb found */ 11429 vsdb_info->freesync_supported = 1; 11430 vsdb_info->amd_vsdb_version = version; 11431 vsdb_info->min_refresh_rate_hz = min_rate; 11432 vsdb_info->max_refresh_rate_hz = max_rate; 11433 return true; 11434 } 11435 /* not amd vsdb */ 11436 return false; 11437 } 11438 11439 /* check for ack*/ 11440 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 11441 if (!res) 11442 return false; 11443 } 11444 11445 return false; 11446 } 11447 11448 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 11449 uint8_t *edid_ext, int len, 11450 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11451 { 11452 int i; 11453 11454 /* send extension block to DMCU for parsing */ 11455 for (i = 0; i < len; i += 8) { 11456 /* send 8 bytes a time */ 11457 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 11458 return false; 11459 } 11460 11461 return vsdb_info->freesync_supported; 11462 } 11463 11464 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 11465 uint8_t *edid_ext, int len, 11466 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11467 { 11468 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 11469 11470 if (adev->dm.dmub_srv) 11471 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 11472 else 11473 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 11474 } 11475 11476 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 11477 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 11478 { 11479 uint8_t *edid_ext = NULL; 11480 int i; 11481 bool valid_vsdb_found = false; 11482 11483 /*----- drm_find_cea_extension() -----*/ 11484 /* No EDID or EDID extensions */ 11485 if (edid == NULL || edid->extensions == 0) 11486 return -ENODEV; 11487 11488 /* Find CEA extension */ 11489 for (i = 0; i < edid->extensions; i++) { 11490 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 11491 if (edid_ext[0] == CEA_EXT) 11492 break; 11493 } 11494 11495 if (i == edid->extensions) 11496 return -ENODEV; 11497 11498 /*----- cea_db_offsets() -----*/ 11499 if (edid_ext[0] != CEA_EXT) 11500 return -ENODEV; 11501 11502 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 11503 11504 return valid_vsdb_found ? i : -ENODEV; 11505 } 11506 11507 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 11508 struct edid *edid) 11509 { 11510 int i = 0; 11511 struct detailed_timing *timing; 11512 struct detailed_non_pixel *data; 11513 struct detailed_data_monitor_range *range; 11514 struct amdgpu_dm_connector *amdgpu_dm_connector = 11515 to_amdgpu_dm_connector(connector); 11516 struct dm_connector_state *dm_con_state = NULL; 11517 struct dc_sink *sink; 11518 11519 struct drm_device *dev = connector->dev; 11520 struct amdgpu_device *adev = drm_to_adev(dev); 11521 bool freesync_capable = false; 11522 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 11523 11524 if (!connector->state) { 11525 DRM_ERROR("%s - Connector has no state", __func__); 11526 goto update; 11527 } 11528 11529 sink = amdgpu_dm_connector->dc_sink ? 11530 amdgpu_dm_connector->dc_sink : 11531 amdgpu_dm_connector->dc_em_sink; 11532 11533 if (!edid || !sink) { 11534 dm_con_state = to_dm_connector_state(connector->state); 11535 11536 amdgpu_dm_connector->min_vfreq = 0; 11537 amdgpu_dm_connector->max_vfreq = 0; 11538 amdgpu_dm_connector->pixel_clock_mhz = 0; 11539 connector->display_info.monitor_range.min_vfreq = 0; 11540 connector->display_info.monitor_range.max_vfreq = 0; 11541 freesync_capable = false; 11542 11543 goto update; 11544 } 11545 11546 dm_con_state = to_dm_connector_state(connector->state); 11547 11548 if (!adev->dm.freesync_module) 11549 goto update; 11550 11551 11552 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 11553 || sink->sink_signal == SIGNAL_TYPE_EDP) { 11554 bool edid_check_required = false; 11555 11556 if (edid) { 11557 edid_check_required = is_dp_capable_without_timing_msa( 11558 adev->dm.dc, 11559 amdgpu_dm_connector); 11560 } 11561 11562 if (edid_check_required == true && (edid->version > 1 || 11563 (edid->version == 1 && edid->revision > 1))) { 11564 for (i = 0; i < 4; i++) { 11565 11566 timing = &edid->detailed_timings[i]; 11567 data = &timing->data.other_data; 11568 range = &data->data.range; 11569 /* 11570 * Check if monitor has continuous frequency mode 11571 */ 11572 if (data->type != EDID_DETAIL_MONITOR_RANGE) 11573 continue; 11574 /* 11575 * Check for flag range limits only. If flag == 1 then 11576 * no additional timing information provided. 11577 * Default GTF, GTF Secondary curve and CVT are not 11578 * supported 11579 */ 11580 if (range->flags != 1) 11581 continue; 11582 11583 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 11584 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 11585 amdgpu_dm_connector->pixel_clock_mhz = 11586 range->pixel_clock_mhz * 10; 11587 11588 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 11589 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 11590 11591 break; 11592 } 11593 11594 if (amdgpu_dm_connector->max_vfreq - 11595 amdgpu_dm_connector->min_vfreq > 10) { 11596 11597 freesync_capable = true; 11598 } 11599 } 11600 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 11601 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 11602 if (i >= 0 && vsdb_info.freesync_supported) { 11603 timing = &edid->detailed_timings[i]; 11604 data = &timing->data.other_data; 11605 11606 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 11607 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 11608 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 11609 freesync_capable = true; 11610 11611 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 11612 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 11613 } 11614 } 11615 11616 update: 11617 if (dm_con_state) 11618 dm_con_state->freesync_capable = freesync_capable; 11619 11620 if (connector->vrr_capable_property) 11621 drm_connector_set_vrr_capable_property(connector, 11622 freesync_capable); 11623 } 11624 11625 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 11626 { 11627 struct amdgpu_device *adev = drm_to_adev(dev); 11628 struct dc *dc = adev->dm.dc; 11629 int i; 11630 11631 mutex_lock(&adev->dm.dc_lock); 11632 if (dc->current_state) { 11633 for (i = 0; i < dc->current_state->stream_count; ++i) 11634 dc->current_state->streams[i] 11635 ->triggered_crtc_reset.enabled = 11636 adev->dm.force_timing_sync; 11637 11638 dm_enable_per_frame_crtc_master_sync(dc->current_state); 11639 dc_trigger_sync(dc, dc->current_state); 11640 } 11641 mutex_unlock(&adev->dm.dc_lock); 11642 } 11643 11644 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 11645 uint32_t value, const char *func_name) 11646 { 11647 #ifdef DM_CHECK_ADDR_0 11648 if (address == 0) { 11649 DC_ERR("invalid register write. address = 0"); 11650 return; 11651 } 11652 #endif 11653 cgs_write_register(ctx->cgs_device, address, value); 11654 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 11655 } 11656 11657 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 11658 const char *func_name) 11659 { 11660 uint32_t value; 11661 #ifdef DM_CHECK_ADDR_0 11662 if (address == 0) { 11663 DC_ERR("invalid register read; address = 0\n"); 11664 return 0; 11665 } 11666 #endif 11667 11668 if (ctx->dmub_srv && 11669 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 11670 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 11671 ASSERT(false); 11672 return 0; 11673 } 11674 11675 value = cgs_read_register(ctx->cgs_device, address); 11676 11677 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 11678 11679 return value; 11680 } 11681 11682 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, 11683 struct dc_context *ctx, 11684 uint8_t status_type, 11685 uint32_t *operation_result) 11686 { 11687 struct amdgpu_device *adev = ctx->driver_context; 11688 int return_status = -1; 11689 struct dmub_notification *p_notify = adev->dm.dmub_notify; 11690 11691 if (is_cmd_aux) { 11692 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { 11693 return_status = p_notify->aux_reply.length; 11694 *operation_result = p_notify->result; 11695 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { 11696 *operation_result = AUX_RET_ERROR_TIMEOUT; 11697 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { 11698 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 11699 } else { 11700 *operation_result = AUX_RET_ERROR_UNKNOWN; 11701 } 11702 } else { 11703 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { 11704 return_status = 0; 11705 *operation_result = p_notify->sc_status; 11706 } else { 11707 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 11708 } 11709 } 11710 11711 return return_status; 11712 } 11713 11714 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, 11715 unsigned int link_index, void *cmd_payload, void *operation_result) 11716 { 11717 struct amdgpu_device *adev = ctx->driver_context; 11718 int ret = 0; 11719 11720 if (is_cmd_aux) { 11721 dc_process_dmub_aux_transfer_async(ctx->dc, 11722 link_index, (struct aux_payload *)cmd_payload); 11723 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index, 11724 (struct set_config_cmd_payload *)cmd_payload, 11725 adev->dm.dmub_notify)) { 11726 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11727 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, 11728 (uint32_t *)operation_result); 11729 } 11730 11731 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); 11732 if (ret == 0) { 11733 DRM_ERROR("wait_for_completion_timeout timeout!"); 11734 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11735 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, 11736 (uint32_t *)operation_result); 11737 } 11738 11739 if (is_cmd_aux) { 11740 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { 11741 struct aux_payload *payload = (struct aux_payload *)cmd_payload; 11742 11743 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 11744 if (!payload->write && adev->dm.dmub_notify->aux_reply.length && 11745 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { 11746 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, 11747 adev->dm.dmub_notify->aux_reply.length); 11748 } 11749 } 11750 } 11751 11752 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11753 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, 11754 (uint32_t *)operation_result); 11755 } 11756 11757 /* 11758 * Check whether seamless boot is supported. 11759 * 11760 * So far we only support seamless boot on CHIP_VANGOGH. 11761 * If everything goes well, we may consider expanding 11762 * seamless boot to other ASICs. 11763 */ 11764 bool check_seamless_boot_capability(struct amdgpu_device *adev) 11765 { 11766 switch (adev->asic_type) { 11767 case CHIP_VANGOGH: 11768 if (!adev->mman.keep_stolen_vga_memory) 11769 return true; 11770 break; 11771 default: 11772 break; 11773 } 11774 11775 return false; 11776 } 11777