1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "dc/dc_state.h" 41 #include "amdgpu_dm_trace.h" 42 #include "dpcd_defs.h" 43 #include "link/protocols/link_dpcd.h" 44 #include "link_service_types.h" 45 #include "link/protocols/link_dp_capability.h" 46 #include "link/protocols/link_ddc.h" 47 48 #include "vid.h" 49 #include "amdgpu.h" 50 #include "amdgpu_display.h" 51 #include "amdgpu_ucode.h" 52 #include "atom.h" 53 #include "amdgpu_dm.h" 54 #include "amdgpu_dm_plane.h" 55 #include "amdgpu_dm_crtc.h" 56 #include "amdgpu_dm_hdcp.h" 57 #include <drm/display/drm_hdcp_helper.h> 58 #include "amdgpu_dm_wb.h" 59 #include "amdgpu_pm.h" 60 #include "amdgpu_atombios.h" 61 62 #include "amd_shared.h" 63 #include "amdgpu_dm_irq.h" 64 #include "dm_helpers.h" 65 #include "amdgpu_dm_mst_types.h" 66 #if defined(CONFIG_DEBUG_FS) 67 #include "amdgpu_dm_debugfs.h" 68 #endif 69 #include "amdgpu_dm_psr.h" 70 #include "amdgpu_dm_replay.h" 71 72 #include "ivsrcid/ivsrcid_vislands30.h" 73 74 #include <linux/backlight.h> 75 #include <linux/module.h> 76 #include <linux/moduleparam.h> 77 #include <linux/types.h> 78 #include <linux/pm_runtime.h> 79 #include <linux/pci.h> 80 #include <linux/power_supply.h> 81 #include <linux/firmware.h> 82 #include <linux/component.h> 83 #include <linux/dmi.h> 84 #include <linux/sort.h> 85 86 #include <drm/display/drm_dp_mst_helper.h> 87 #include <drm/display/drm_hdmi_helper.h> 88 #include <drm/drm_atomic.h> 89 #include <drm/drm_atomic_uapi.h> 90 #include <drm/drm_atomic_helper.h> 91 #include <drm/drm_blend.h> 92 #include <drm/drm_fixed.h> 93 #include <drm/drm_fourcc.h> 94 #include <drm/drm_edid.h> 95 #include <drm/drm_eld.h> 96 #include <drm/drm_vblank.h> 97 #include <drm/drm_audio_component.h> 98 #include <drm/drm_gem_atomic_helper.h> 99 100 #include <acpi/video.h> 101 102 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 103 104 #include "dcn/dcn_1_0_offset.h" 105 #include "dcn/dcn_1_0_sh_mask.h" 106 #include "soc15_hw_ip.h" 107 #include "soc15_common.h" 108 #include "vega10_ip_offset.h" 109 110 #include "gc/gc_11_0_0_offset.h" 111 #include "gc/gc_11_0_0_sh_mask.h" 112 113 #include "modules/inc/mod_freesync.h" 114 #include "modules/power/power_helpers.h" 115 116 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 118 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 120 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 122 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 124 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 125 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 126 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 127 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 128 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 129 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 130 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 131 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 132 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 133 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 134 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 135 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 136 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 137 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 138 139 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 140 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 141 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 142 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 143 144 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 145 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 146 147 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 148 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 149 150 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 151 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 152 153 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 154 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 155 156 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 157 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 158 159 /* Number of bytes in PSP header for firmware. */ 160 #define PSP_HEADER_BYTES 0x100 161 162 /* Number of bytes in PSP footer for firmware. */ 163 #define PSP_FOOTER_BYTES 0x100 164 165 /** 166 * DOC: overview 167 * 168 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 169 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 170 * requests into DC requests, and DC responses into DRM responses. 171 * 172 * The root control structure is &struct amdgpu_display_manager. 173 */ 174 175 /* basic init/fini API */ 176 static int amdgpu_dm_init(struct amdgpu_device *adev); 177 static void amdgpu_dm_fini(struct amdgpu_device *adev); 178 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 179 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); 180 181 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 182 { 183 switch (link->dpcd_caps.dongle_type) { 184 case DISPLAY_DONGLE_NONE: 185 return DRM_MODE_SUBCONNECTOR_Native; 186 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 187 return DRM_MODE_SUBCONNECTOR_VGA; 188 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 189 case DISPLAY_DONGLE_DP_DVI_DONGLE: 190 return DRM_MODE_SUBCONNECTOR_DVID; 191 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 192 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 193 return DRM_MODE_SUBCONNECTOR_HDMIA; 194 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 195 default: 196 return DRM_MODE_SUBCONNECTOR_Unknown; 197 } 198 } 199 200 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 201 { 202 struct dc_link *link = aconnector->dc_link; 203 struct drm_connector *connector = &aconnector->base; 204 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 205 206 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 207 return; 208 209 if (aconnector->dc_sink) 210 subconnector = get_subconnector_type(link); 211 212 drm_object_property_set_value(&connector->base, 213 connector->dev->mode_config.dp_subconnector_property, 214 subconnector); 215 } 216 217 /* 218 * initializes drm_device display related structures, based on the information 219 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 220 * drm_encoder, drm_mode_config 221 * 222 * Returns 0 on success 223 */ 224 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 225 /* removes and deallocates the drm structures, created by the above function */ 226 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 227 228 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 229 struct amdgpu_dm_connector *amdgpu_dm_connector, 230 u32 link_index, 231 struct amdgpu_encoder *amdgpu_encoder); 232 static int amdgpu_dm_encoder_init(struct drm_device *dev, 233 struct amdgpu_encoder *aencoder, 234 uint32_t link_index); 235 236 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 237 238 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 239 240 static int amdgpu_dm_atomic_check(struct drm_device *dev, 241 struct drm_atomic_state *state); 242 243 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 244 static void handle_hpd_rx_irq(void *param); 245 246 static bool 247 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 248 struct drm_crtc_state *new_crtc_state); 249 /* 250 * dm_vblank_get_counter 251 * 252 * @brief 253 * Get counter for number of vertical blanks 254 * 255 * @param 256 * struct amdgpu_device *adev - [in] desired amdgpu device 257 * int disp_idx - [in] which CRTC to get the counter from 258 * 259 * @return 260 * Counter for vertical blanks 261 */ 262 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 263 { 264 struct amdgpu_crtc *acrtc = NULL; 265 266 if (crtc >= adev->mode_info.num_crtc) 267 return 0; 268 269 acrtc = adev->mode_info.crtcs[crtc]; 270 271 if (!acrtc->dm_irq_params.stream) { 272 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 273 crtc); 274 return 0; 275 } 276 277 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 278 } 279 280 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 281 u32 *vbl, u32 *position) 282 { 283 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; 284 struct amdgpu_crtc *acrtc = NULL; 285 struct dc *dc = adev->dm.dc; 286 287 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 288 return -EINVAL; 289 290 acrtc = adev->mode_info.crtcs[crtc]; 291 292 if (!acrtc->dm_irq_params.stream) { 293 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 294 crtc); 295 return 0; 296 } 297 298 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 299 dc_allow_idle_optimizations(dc, false); 300 301 /* 302 * TODO rework base driver to use values directly. 303 * for now parse it back into reg-format 304 */ 305 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 306 &v_blank_start, 307 &v_blank_end, 308 &h_position, 309 &v_position); 310 311 *position = v_position | (h_position << 16); 312 *vbl = v_blank_start | (v_blank_end << 16); 313 314 return 0; 315 } 316 317 static bool dm_is_idle(void *handle) 318 { 319 /* XXX todo */ 320 return true; 321 } 322 323 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) 324 { 325 /* XXX todo */ 326 return 0; 327 } 328 329 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) 330 { 331 return false; 332 } 333 334 static int dm_soft_reset(struct amdgpu_ip_block *ip_block) 335 { 336 /* XXX todo */ 337 return 0; 338 } 339 340 static struct amdgpu_crtc * 341 get_crtc_by_otg_inst(struct amdgpu_device *adev, 342 int otg_inst) 343 { 344 struct drm_device *dev = adev_to_drm(adev); 345 struct drm_crtc *crtc; 346 struct amdgpu_crtc *amdgpu_crtc; 347 348 if (WARN_ON(otg_inst == -1)) 349 return adev->mode_info.crtcs[0]; 350 351 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 352 amdgpu_crtc = to_amdgpu_crtc(crtc); 353 354 if (amdgpu_crtc->otg_inst == otg_inst) 355 return amdgpu_crtc; 356 } 357 358 return NULL; 359 } 360 361 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 362 struct dm_crtc_state *new_state) 363 { 364 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 365 return true; 366 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 367 return true; 368 else 369 return false; 370 } 371 372 /* 373 * DC will program planes with their z-order determined by their ordering 374 * in the dc_surface_updates array. This comparator is used to sort them 375 * by descending zpos. 376 */ 377 static int dm_plane_layer_index_cmp(const void *a, const void *b) 378 { 379 const struct dc_surface_update *sa = (struct dc_surface_update *)a; 380 const struct dc_surface_update *sb = (struct dc_surface_update *)b; 381 382 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ 383 return sb->surface->layer_index - sa->surface->layer_index; 384 } 385 386 /** 387 * update_planes_and_stream_adapter() - Send planes to be updated in DC 388 * 389 * DC has a generic way to update planes and stream via 390 * dc_update_planes_and_stream function; however, DM might need some 391 * adjustments and preparation before calling it. This function is a wrapper 392 * for the dc_update_planes_and_stream that does any required configuration 393 * before passing control to DC. 394 * 395 * @dc: Display Core control structure 396 * @update_type: specify whether it is FULL/MEDIUM/FAST update 397 * @planes_count: planes count to update 398 * @stream: stream state 399 * @stream_update: stream update 400 * @array_of_surface_update: dc surface update pointer 401 * 402 */ 403 static inline bool update_planes_and_stream_adapter(struct dc *dc, 404 int update_type, 405 int planes_count, 406 struct dc_stream_state *stream, 407 struct dc_stream_update *stream_update, 408 struct dc_surface_update *array_of_surface_update) 409 { 410 sort(array_of_surface_update, planes_count, 411 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); 412 413 /* 414 * Previous frame finished and HW is ready for optimization. 415 */ 416 if (update_type == UPDATE_TYPE_FAST) 417 dc_post_update_surfaces_to_stream(dc); 418 419 return dc_update_planes_and_stream(dc, 420 array_of_surface_update, 421 planes_count, 422 stream, 423 stream_update); 424 } 425 426 /** 427 * dm_pflip_high_irq() - Handle pageflip interrupt 428 * @interrupt_params: ignored 429 * 430 * Handles the pageflip interrupt by notifying all interested parties 431 * that the pageflip has been completed. 432 */ 433 static void dm_pflip_high_irq(void *interrupt_params) 434 { 435 struct amdgpu_crtc *amdgpu_crtc; 436 struct common_irq_params *irq_params = interrupt_params; 437 struct amdgpu_device *adev = irq_params->adev; 438 struct drm_device *dev = adev_to_drm(adev); 439 unsigned long flags; 440 struct drm_pending_vblank_event *e; 441 u32 vpos, hpos, v_blank_start, v_blank_end; 442 bool vrr_active; 443 444 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 445 446 /* IRQ could occur when in initial stage */ 447 /* TODO work and BO cleanup */ 448 if (amdgpu_crtc == NULL) { 449 drm_dbg_state(dev, "CRTC is null, returning.\n"); 450 return; 451 } 452 453 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 454 455 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 456 drm_dbg_state(dev, 457 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 458 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 459 amdgpu_crtc->crtc_id, amdgpu_crtc); 460 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 461 return; 462 } 463 464 /* page flip completed. */ 465 e = amdgpu_crtc->event; 466 amdgpu_crtc->event = NULL; 467 468 WARN_ON(!e); 469 470 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 471 472 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 473 if (!vrr_active || 474 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 475 &v_blank_end, &hpos, &vpos) || 476 (vpos < v_blank_start)) { 477 /* Update to correct count and vblank timestamp if racing with 478 * vblank irq. This also updates to the correct vblank timestamp 479 * even in VRR mode, as scanout is past the front-porch atm. 480 */ 481 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 482 483 /* Wake up userspace by sending the pageflip event with proper 484 * count and timestamp of vblank of flip completion. 485 */ 486 if (e) { 487 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 488 489 /* Event sent, so done with vblank for this flip */ 490 drm_crtc_vblank_put(&amdgpu_crtc->base); 491 } 492 } else if (e) { 493 /* VRR active and inside front-porch: vblank count and 494 * timestamp for pageflip event will only be up to date after 495 * drm_crtc_handle_vblank() has been executed from late vblank 496 * irq handler after start of back-porch (vline 0). We queue the 497 * pageflip event for send-out by drm_crtc_handle_vblank() with 498 * updated timestamp and count, once it runs after us. 499 * 500 * We need to open-code this instead of using the helper 501 * drm_crtc_arm_vblank_event(), as that helper would 502 * call drm_crtc_accurate_vblank_count(), which we must 503 * not call in VRR mode while we are in front-porch! 504 */ 505 506 /* sequence will be replaced by real count during send-out. */ 507 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 508 e->pipe = amdgpu_crtc->crtc_id; 509 510 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 511 e = NULL; 512 } 513 514 /* Keep track of vblank of this flip for flip throttling. We use the 515 * cooked hw counter, as that one incremented at start of this vblank 516 * of pageflip completion, so last_flip_vblank is the forbidden count 517 * for queueing new pageflips if vsync + VRR is enabled. 518 */ 519 amdgpu_crtc->dm_irq_params.last_flip_vblank = 520 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 521 522 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 523 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 524 525 drm_dbg_state(dev, 526 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 527 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 528 } 529 530 static void dm_vupdate_high_irq(void *interrupt_params) 531 { 532 struct common_irq_params *irq_params = interrupt_params; 533 struct amdgpu_device *adev = irq_params->adev; 534 struct amdgpu_crtc *acrtc; 535 struct drm_device *drm_dev; 536 struct drm_vblank_crtc *vblank; 537 ktime_t frame_duration_ns, previous_timestamp; 538 unsigned long flags; 539 int vrr_active; 540 541 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 542 543 if (acrtc) { 544 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 545 drm_dev = acrtc->base.dev; 546 vblank = drm_crtc_vblank_crtc(&acrtc->base); 547 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 548 frame_duration_ns = vblank->time - previous_timestamp; 549 550 if (frame_duration_ns > 0) { 551 trace_amdgpu_refresh_rate_track(acrtc->base.index, 552 frame_duration_ns, 553 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 554 atomic64_set(&irq_params->previous_timestamp, vblank->time); 555 } 556 557 drm_dbg_vbl(drm_dev, 558 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 559 vrr_active); 560 561 /* Core vblank handling is done here after end of front-porch in 562 * vrr mode, as vblank timestamping will give valid results 563 * while now done after front-porch. This will also deliver 564 * page-flip completion events that have been queued to us 565 * if a pageflip happened inside front-porch. 566 */ 567 if (vrr_active) { 568 amdgpu_dm_crtc_handle_vblank(acrtc); 569 570 /* BTR processing for pre-DCE12 ASICs */ 571 if (acrtc->dm_irq_params.stream && 572 adev->family < AMDGPU_FAMILY_AI) { 573 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 574 mod_freesync_handle_v_update( 575 adev->dm.freesync_module, 576 acrtc->dm_irq_params.stream, 577 &acrtc->dm_irq_params.vrr_params); 578 579 dc_stream_adjust_vmin_vmax( 580 adev->dm.dc, 581 acrtc->dm_irq_params.stream, 582 &acrtc->dm_irq_params.vrr_params.adjust); 583 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 584 } 585 } 586 } 587 } 588 589 /** 590 * dm_crtc_high_irq() - Handles CRTC interrupt 591 * @interrupt_params: used for determining the CRTC instance 592 * 593 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 594 * event handler. 595 */ 596 static void dm_crtc_high_irq(void *interrupt_params) 597 { 598 struct common_irq_params *irq_params = interrupt_params; 599 struct amdgpu_device *adev = irq_params->adev; 600 struct drm_writeback_job *job; 601 struct amdgpu_crtc *acrtc; 602 unsigned long flags; 603 int vrr_active; 604 605 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 606 if (!acrtc) 607 return; 608 609 if (acrtc->wb_conn) { 610 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 611 612 if (acrtc->wb_pending) { 613 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 614 struct drm_writeback_job, 615 list_entry); 616 acrtc->wb_pending = false; 617 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 618 619 if (job) { 620 unsigned int v_total, refresh_hz; 621 struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 622 623 v_total = stream->adjust.v_total_max ? 624 stream->adjust.v_total_max : stream->timing.v_total; 625 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 626 100LL, (v_total * stream->timing.h_total)); 627 mdelay(1000 / refresh_hz); 628 629 drm_writeback_signal_completion(acrtc->wb_conn, 0); 630 dc_stream_fc_disable_writeback(adev->dm.dc, 631 acrtc->dm_irq_params.stream, 0); 632 } 633 } else 634 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 635 } 636 637 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 638 639 drm_dbg_vbl(adev_to_drm(adev), 640 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 641 vrr_active, acrtc->dm_irq_params.active_planes); 642 643 /** 644 * Core vblank handling at start of front-porch is only possible 645 * in non-vrr mode, as only there vblank timestamping will give 646 * valid results while done in front-porch. Otherwise defer it 647 * to dm_vupdate_high_irq after end of front-porch. 648 */ 649 if (!vrr_active) 650 amdgpu_dm_crtc_handle_vblank(acrtc); 651 652 /** 653 * Following stuff must happen at start of vblank, for crc 654 * computation and below-the-range btr support in vrr mode. 655 */ 656 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 657 658 /* BTR updates need to happen before VUPDATE on Vega and above. */ 659 if (adev->family < AMDGPU_FAMILY_AI) 660 return; 661 662 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 663 664 if (acrtc->dm_irq_params.stream && 665 acrtc->dm_irq_params.vrr_params.supported && 666 acrtc->dm_irq_params.freesync_config.state == 667 VRR_STATE_ACTIVE_VARIABLE) { 668 mod_freesync_handle_v_update(adev->dm.freesync_module, 669 acrtc->dm_irq_params.stream, 670 &acrtc->dm_irq_params.vrr_params); 671 672 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 673 &acrtc->dm_irq_params.vrr_params.adjust); 674 } 675 676 /* 677 * If there aren't any active_planes then DCH HUBP may be clock-gated. 678 * In that case, pageflip completion interrupts won't fire and pageflip 679 * completion events won't get delivered. Prevent this by sending 680 * pending pageflip events from here if a flip is still pending. 681 * 682 * If any planes are enabled, use dm_pflip_high_irq() instead, to 683 * avoid race conditions between flip programming and completion, 684 * which could cause too early flip completion events. 685 */ 686 if (adev->family >= AMDGPU_FAMILY_RV && 687 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 688 acrtc->dm_irq_params.active_planes == 0) { 689 if (acrtc->event) { 690 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 691 acrtc->event = NULL; 692 drm_crtc_vblank_put(&acrtc->base); 693 } 694 acrtc->pflip_status = AMDGPU_FLIP_NONE; 695 } 696 697 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 698 } 699 700 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 701 /** 702 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 703 * DCN generation ASICs 704 * @interrupt_params: interrupt parameters 705 * 706 * Used to set crc window/read out crc value at vertical line 0 position 707 */ 708 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 709 { 710 struct common_irq_params *irq_params = interrupt_params; 711 struct amdgpu_device *adev = irq_params->adev; 712 struct amdgpu_crtc *acrtc; 713 714 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 715 716 if (!acrtc) 717 return; 718 719 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 720 } 721 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 722 723 /** 724 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 725 * @adev: amdgpu_device pointer 726 * @notify: dmub notification structure 727 * 728 * Dmub AUX or SET_CONFIG command completion processing callback 729 * Copies dmub notification to DM which is to be read by AUX command. 730 * issuing thread and also signals the event to wake up the thread. 731 */ 732 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 733 struct dmub_notification *notify) 734 { 735 if (adev->dm.dmub_notify) 736 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 737 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 738 complete(&adev->dm.dmub_aux_transfer_done); 739 } 740 741 /** 742 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 743 * @adev: amdgpu_device pointer 744 * @notify: dmub notification structure 745 * 746 * Dmub Hpd interrupt processing callback. Gets displayindex through the 747 * ink index and calls helper to do the processing. 748 */ 749 static void dmub_hpd_callback(struct amdgpu_device *adev, 750 struct dmub_notification *notify) 751 { 752 struct amdgpu_dm_connector *aconnector; 753 struct amdgpu_dm_connector *hpd_aconnector = NULL; 754 struct drm_connector *connector; 755 struct drm_connector_list_iter iter; 756 struct dc_link *link; 757 u8 link_index = 0; 758 struct drm_device *dev; 759 760 if (adev == NULL) 761 return; 762 763 if (notify == NULL) { 764 DRM_ERROR("DMUB HPD callback notification was NULL"); 765 return; 766 } 767 768 if (notify->link_index > adev->dm.dc->link_count) { 769 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 770 return; 771 } 772 773 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ 774 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { 775 DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); 776 return; 777 } 778 779 link_index = notify->link_index; 780 link = adev->dm.dc->links[link_index]; 781 dev = adev->dm.ddev; 782 783 drm_connector_list_iter_begin(dev, &iter); 784 drm_for_each_connector_iter(connector, &iter) { 785 786 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 787 continue; 788 789 aconnector = to_amdgpu_dm_connector(connector); 790 if (link && aconnector->dc_link == link) { 791 if (notify->type == DMUB_NOTIFICATION_HPD) 792 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 793 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 794 DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index); 795 else 796 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 797 notify->type, link_index); 798 799 hpd_aconnector = aconnector; 800 break; 801 } 802 } 803 drm_connector_list_iter_end(&iter); 804 805 if (hpd_aconnector) { 806 if (notify->type == DMUB_NOTIFICATION_HPD) { 807 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) 808 DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index); 809 handle_hpd_irq_helper(hpd_aconnector); 810 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { 811 handle_hpd_rx_irq(hpd_aconnector); 812 } 813 } 814 } 815 816 /** 817 * dmub_hpd_sense_callback - DMUB HPD sense processing callback. 818 * @adev: amdgpu_device pointer 819 * @notify: dmub notification structure 820 * 821 * HPD sense changes can occur during low power states and need to be 822 * notified from firmware to driver. 823 */ 824 static void dmub_hpd_sense_callback(struct amdgpu_device *adev, 825 struct dmub_notification *notify) 826 { 827 DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); 828 } 829 830 /** 831 * register_dmub_notify_callback - Sets callback for DMUB notify 832 * @adev: amdgpu_device pointer 833 * @type: Type of dmub notification 834 * @callback: Dmub interrupt callback function 835 * @dmub_int_thread_offload: offload indicator 836 * 837 * API to register a dmub callback handler for a dmub notification 838 * Also sets indicator whether callback processing to be offloaded. 839 * to dmub interrupt handling thread 840 * Return: true if successfully registered, false if there is existing registration 841 */ 842 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 843 enum dmub_notification_type type, 844 dmub_notify_interrupt_callback_t callback, 845 bool dmub_int_thread_offload) 846 { 847 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 848 adev->dm.dmub_callback[type] = callback; 849 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 850 } else 851 return false; 852 853 return true; 854 } 855 856 static void dm_handle_hpd_work(struct work_struct *work) 857 { 858 struct dmub_hpd_work *dmub_hpd_wrk; 859 860 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 861 862 if (!dmub_hpd_wrk->dmub_notify) { 863 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 864 return; 865 } 866 867 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 868 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 869 dmub_hpd_wrk->dmub_notify); 870 } 871 872 kfree(dmub_hpd_wrk->dmub_notify); 873 kfree(dmub_hpd_wrk); 874 875 } 876 877 #define DMUB_TRACE_MAX_READ 64 878 /** 879 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 880 * @interrupt_params: used for determining the Outbox instance 881 * 882 * Handles the Outbox Interrupt 883 * event handler. 884 */ 885 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 886 { 887 struct dmub_notification notify = {0}; 888 struct common_irq_params *irq_params = interrupt_params; 889 struct amdgpu_device *adev = irq_params->adev; 890 struct amdgpu_display_manager *dm = &adev->dm; 891 struct dmcub_trace_buf_entry entry = { 0 }; 892 u32 count = 0; 893 struct dmub_hpd_work *dmub_hpd_wrk; 894 static const char *const event_type[] = { 895 "NO_DATA", 896 "AUX_REPLY", 897 "HPD", 898 "HPD_IRQ", 899 "SET_CONFIGC_REPLY", 900 "DPIA_NOTIFICATION", 901 "HPD_SENSE_NOTIFY", 902 }; 903 904 do { 905 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 906 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 907 entry.param0, entry.param1); 908 909 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 910 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 911 } else 912 break; 913 914 count++; 915 916 } while (count <= DMUB_TRACE_MAX_READ); 917 918 if (count > DMUB_TRACE_MAX_READ) 919 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 920 921 if (dc_enable_dmub_notifications(adev->dm.dc) && 922 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 923 924 do { 925 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 926 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 927 DRM_ERROR("DM: notify type %d invalid!", notify.type); 928 continue; 929 } 930 if (!dm->dmub_callback[notify.type]) { 931 DRM_WARN("DMUB notification skipped due to no handler: type=%s\n", 932 event_type[notify.type]); 933 continue; 934 } 935 if (dm->dmub_thread_offload[notify.type] == true) { 936 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 937 if (!dmub_hpd_wrk) { 938 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 939 return; 940 } 941 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 942 GFP_ATOMIC); 943 if (!dmub_hpd_wrk->dmub_notify) { 944 kfree(dmub_hpd_wrk); 945 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 946 return; 947 } 948 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 949 dmub_hpd_wrk->adev = adev; 950 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 951 } else { 952 dm->dmub_callback[notify.type](adev, ¬ify); 953 } 954 } while (notify.pending_notification); 955 } 956 } 957 958 static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, 959 enum amd_clockgating_state state) 960 { 961 return 0; 962 } 963 964 static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, 965 enum amd_powergating_state state) 966 { 967 return 0; 968 } 969 970 /* Prototypes of private functions */ 971 static int dm_early_init(struct amdgpu_ip_block *ip_block); 972 973 /* Allocate memory for FBC compressed data */ 974 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 975 { 976 struct amdgpu_device *adev = drm_to_adev(connector->dev); 977 struct dm_compressor_info *compressor = &adev->dm.compressor; 978 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 979 struct drm_display_mode *mode; 980 unsigned long max_size = 0; 981 982 if (adev->dm.dc->fbc_compressor == NULL) 983 return; 984 985 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 986 return; 987 988 if (compressor->bo_ptr) 989 return; 990 991 992 list_for_each_entry(mode, &connector->modes, head) { 993 if (max_size < (unsigned long) mode->htotal * mode->vtotal) 994 max_size = (unsigned long) mode->htotal * mode->vtotal; 995 } 996 997 if (max_size) { 998 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 999 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 1000 &compressor->gpu_addr, &compressor->cpu_addr); 1001 1002 if (r) 1003 DRM_ERROR("DM: Failed to initialize FBC\n"); 1004 else { 1005 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 1006 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 1007 } 1008 1009 } 1010 1011 } 1012 1013 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 1014 int pipe, bool *enabled, 1015 unsigned char *buf, int max_bytes) 1016 { 1017 struct drm_device *dev = dev_get_drvdata(kdev); 1018 struct amdgpu_device *adev = drm_to_adev(dev); 1019 struct drm_connector *connector; 1020 struct drm_connector_list_iter conn_iter; 1021 struct amdgpu_dm_connector *aconnector; 1022 int ret = 0; 1023 1024 *enabled = false; 1025 1026 mutex_lock(&adev->dm.audio_lock); 1027 1028 drm_connector_list_iter_begin(dev, &conn_iter); 1029 drm_for_each_connector_iter(connector, &conn_iter) { 1030 1031 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1032 continue; 1033 1034 aconnector = to_amdgpu_dm_connector(connector); 1035 if (aconnector->audio_inst != port) 1036 continue; 1037 1038 *enabled = true; 1039 ret = drm_eld_size(connector->eld); 1040 memcpy(buf, connector->eld, min(max_bytes, ret)); 1041 1042 break; 1043 } 1044 drm_connector_list_iter_end(&conn_iter); 1045 1046 mutex_unlock(&adev->dm.audio_lock); 1047 1048 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 1049 1050 return ret; 1051 } 1052 1053 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 1054 .get_eld = amdgpu_dm_audio_component_get_eld, 1055 }; 1056 1057 static int amdgpu_dm_audio_component_bind(struct device *kdev, 1058 struct device *hda_kdev, void *data) 1059 { 1060 struct drm_device *dev = dev_get_drvdata(kdev); 1061 struct amdgpu_device *adev = drm_to_adev(dev); 1062 struct drm_audio_component *acomp = data; 1063 1064 acomp->ops = &amdgpu_dm_audio_component_ops; 1065 acomp->dev = kdev; 1066 adev->dm.audio_component = acomp; 1067 1068 return 0; 1069 } 1070 1071 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 1072 struct device *hda_kdev, void *data) 1073 { 1074 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); 1075 struct drm_audio_component *acomp = data; 1076 1077 acomp->ops = NULL; 1078 acomp->dev = NULL; 1079 adev->dm.audio_component = NULL; 1080 } 1081 1082 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1083 .bind = amdgpu_dm_audio_component_bind, 1084 .unbind = amdgpu_dm_audio_component_unbind, 1085 }; 1086 1087 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1088 { 1089 int i, ret; 1090 1091 if (!amdgpu_audio) 1092 return 0; 1093 1094 adev->mode_info.audio.enabled = true; 1095 1096 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1097 1098 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1099 adev->mode_info.audio.pin[i].channels = -1; 1100 adev->mode_info.audio.pin[i].rate = -1; 1101 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1102 adev->mode_info.audio.pin[i].status_bits = 0; 1103 adev->mode_info.audio.pin[i].category_code = 0; 1104 adev->mode_info.audio.pin[i].connected = false; 1105 adev->mode_info.audio.pin[i].id = 1106 adev->dm.dc->res_pool->audios[i]->inst; 1107 adev->mode_info.audio.pin[i].offset = 0; 1108 } 1109 1110 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1111 if (ret < 0) 1112 return ret; 1113 1114 adev->dm.audio_registered = true; 1115 1116 return 0; 1117 } 1118 1119 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1120 { 1121 if (!amdgpu_audio) 1122 return; 1123 1124 if (!adev->mode_info.audio.enabled) 1125 return; 1126 1127 if (adev->dm.audio_registered) { 1128 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1129 adev->dm.audio_registered = false; 1130 } 1131 1132 /* TODO: Disable audio? */ 1133 1134 adev->mode_info.audio.enabled = false; 1135 } 1136 1137 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1138 { 1139 struct drm_audio_component *acomp = adev->dm.audio_component; 1140 1141 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1142 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1143 1144 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1145 pin, -1); 1146 } 1147 } 1148 1149 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1150 { 1151 const struct dmcub_firmware_header_v1_0 *hdr; 1152 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1153 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1154 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1155 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1156 struct abm *abm = adev->dm.dc->res_pool->abm; 1157 struct dc_context *ctx = adev->dm.dc->ctx; 1158 struct dmub_srv_hw_params hw_params; 1159 enum dmub_status status; 1160 const unsigned char *fw_inst_const, *fw_bss_data; 1161 u32 i, fw_inst_const_size, fw_bss_data_size; 1162 bool has_hw_support; 1163 1164 if (!dmub_srv) 1165 /* DMUB isn't supported on the ASIC. */ 1166 return 0; 1167 1168 if (!fb_info) { 1169 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1170 return -EINVAL; 1171 } 1172 1173 if (!dmub_fw) { 1174 /* Firmware required for DMUB support. */ 1175 DRM_ERROR("No firmware provided for DMUB.\n"); 1176 return -EINVAL; 1177 } 1178 1179 /* initialize register offsets for ASICs with runtime initialization available */ 1180 if (dmub_srv->hw_funcs.init_reg_offsets) 1181 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1182 1183 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1184 if (status != DMUB_STATUS_OK) { 1185 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1186 return -EINVAL; 1187 } 1188 1189 if (!has_hw_support) { 1190 DRM_INFO("DMUB unsupported on ASIC\n"); 1191 return 0; 1192 } 1193 1194 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1195 status = dmub_srv_hw_reset(dmub_srv); 1196 if (status != DMUB_STATUS_OK) 1197 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1198 1199 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1200 1201 fw_inst_const = dmub_fw->data + 1202 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1203 PSP_HEADER_BYTES; 1204 1205 fw_bss_data = dmub_fw->data + 1206 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1207 le32_to_cpu(hdr->inst_const_bytes); 1208 1209 /* Copy firmware and bios info into FB memory. */ 1210 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1211 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1212 1213 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1214 1215 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1216 * amdgpu_ucode_init_single_fw will load dmub firmware 1217 * fw_inst_const part to cw0; otherwise, the firmware back door load 1218 * will be done by dm_dmub_hw_init 1219 */ 1220 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1221 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1222 fw_inst_const_size); 1223 } 1224 1225 if (fw_bss_data_size) 1226 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1227 fw_bss_data, fw_bss_data_size); 1228 1229 /* Copy firmware bios info into FB memory. */ 1230 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1231 adev->bios_size); 1232 1233 /* Reset regions that need to be reset. */ 1234 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1235 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1236 1237 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1238 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1239 1240 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1241 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1242 1243 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, 1244 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); 1245 1246 /* Initialize hardware. */ 1247 memset(&hw_params, 0, sizeof(hw_params)); 1248 hw_params.fb_base = adev->gmc.fb_start; 1249 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1250 1251 /* backdoor load firmware and trigger dmub running */ 1252 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1253 hw_params.load_inst_const = true; 1254 1255 if (dmcu) 1256 hw_params.psp_version = dmcu->psp_version; 1257 1258 for (i = 0; i < fb_info->num_fb; ++i) 1259 hw_params.fb[i] = &fb_info->fb[i]; 1260 1261 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1262 case IP_VERSION(3, 1, 3): 1263 case IP_VERSION(3, 1, 4): 1264 case IP_VERSION(3, 5, 0): 1265 case IP_VERSION(3, 5, 1): 1266 case IP_VERSION(4, 0, 1): 1267 hw_params.dpia_supported = true; 1268 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1269 break; 1270 default: 1271 break; 1272 } 1273 1274 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1275 case IP_VERSION(3, 5, 0): 1276 case IP_VERSION(3, 5, 1): 1277 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1278 break; 1279 default: 1280 break; 1281 } 1282 1283 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1284 if (status != DMUB_STATUS_OK) { 1285 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1286 return -EINVAL; 1287 } 1288 1289 /* Wait for firmware load to finish. */ 1290 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1291 if (status != DMUB_STATUS_OK) 1292 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1293 1294 /* Init DMCU and ABM if available. */ 1295 if (dmcu && abm) { 1296 dmcu->funcs->dmcu_init(dmcu); 1297 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1298 } 1299 1300 if (!adev->dm.dc->ctx->dmub_srv) 1301 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1302 if (!adev->dm.dc->ctx->dmub_srv) { 1303 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1304 return -ENOMEM; 1305 } 1306 1307 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1308 adev->dm.dmcub_fw_version); 1309 1310 /* Keeping sanity checks off if 1311 * DCN31 >= 4.0.59.0 1312 * DCN314 >= 8.0.16.0 1313 * Otherwise, turn on sanity checks 1314 */ 1315 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1316 case IP_VERSION(3, 1, 2): 1317 case IP_VERSION(3, 1, 3): 1318 if (adev->dm.dmcub_fw_version && 1319 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1320 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) 1321 adev->dm.dc->debug.sanity_checks = true; 1322 break; 1323 case IP_VERSION(3, 1, 4): 1324 if (adev->dm.dmcub_fw_version && 1325 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1326 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) 1327 adev->dm.dc->debug.sanity_checks = true; 1328 break; 1329 default: 1330 break; 1331 } 1332 1333 return 0; 1334 } 1335 1336 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1337 { 1338 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1339 enum dmub_status status; 1340 bool init; 1341 int r; 1342 1343 if (!dmub_srv) { 1344 /* DMUB isn't supported on the ASIC. */ 1345 return; 1346 } 1347 1348 status = dmub_srv_is_hw_init(dmub_srv, &init); 1349 if (status != DMUB_STATUS_OK) 1350 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1351 1352 if (status == DMUB_STATUS_OK && init) { 1353 /* Wait for firmware load to finish. */ 1354 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1355 if (status != DMUB_STATUS_OK) 1356 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1357 } else { 1358 /* Perform the full hardware initialization. */ 1359 r = dm_dmub_hw_init(adev); 1360 if (r) 1361 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1362 } 1363 } 1364 1365 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1366 { 1367 u64 pt_base; 1368 u32 logical_addr_low; 1369 u32 logical_addr_high; 1370 u32 agp_base, agp_bot, agp_top; 1371 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1372 1373 memset(pa_config, 0, sizeof(*pa_config)); 1374 1375 agp_base = 0; 1376 agp_bot = adev->gmc.agp_start >> 24; 1377 agp_top = adev->gmc.agp_end >> 24; 1378 1379 /* AGP aperture is disabled */ 1380 if (agp_bot > agp_top) { 1381 logical_addr_low = adev->gmc.fb_start >> 18; 1382 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1383 AMD_APU_IS_RENOIR | 1384 AMD_APU_IS_GREEN_SARDINE)) 1385 /* 1386 * Raven2 has a HW issue that it is unable to use the vram which 1387 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1388 * workaround that increase system aperture high address (add 1) 1389 * to get rid of the VM fault and hardware hang. 1390 */ 1391 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1392 else 1393 logical_addr_high = adev->gmc.fb_end >> 18; 1394 } else { 1395 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1396 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1397 AMD_APU_IS_RENOIR | 1398 AMD_APU_IS_GREEN_SARDINE)) 1399 /* 1400 * Raven2 has a HW issue that it is unable to use the vram which 1401 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1402 * workaround that increase system aperture high address (add 1) 1403 * to get rid of the VM fault and hardware hang. 1404 */ 1405 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1406 else 1407 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1408 } 1409 1410 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1411 1412 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1413 AMDGPU_GPU_PAGE_SHIFT); 1414 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1415 AMDGPU_GPU_PAGE_SHIFT); 1416 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1417 AMDGPU_GPU_PAGE_SHIFT); 1418 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1419 AMDGPU_GPU_PAGE_SHIFT); 1420 page_table_base.high_part = upper_32_bits(pt_base); 1421 page_table_base.low_part = lower_32_bits(pt_base); 1422 1423 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1424 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1425 1426 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1427 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1428 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1429 1430 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1431 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1432 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1433 1434 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1435 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1436 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1437 1438 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1439 1440 } 1441 1442 static void force_connector_state( 1443 struct amdgpu_dm_connector *aconnector, 1444 enum drm_connector_force force_state) 1445 { 1446 struct drm_connector *connector = &aconnector->base; 1447 1448 mutex_lock(&connector->dev->mode_config.mutex); 1449 aconnector->base.force = force_state; 1450 mutex_unlock(&connector->dev->mode_config.mutex); 1451 1452 mutex_lock(&aconnector->hpd_lock); 1453 drm_kms_helper_connector_hotplug_event(connector); 1454 mutex_unlock(&aconnector->hpd_lock); 1455 } 1456 1457 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1458 { 1459 struct hpd_rx_irq_offload_work *offload_work; 1460 struct amdgpu_dm_connector *aconnector; 1461 struct dc_link *dc_link; 1462 struct amdgpu_device *adev; 1463 enum dc_connection_type new_connection_type = dc_connection_none; 1464 unsigned long flags; 1465 union test_response test_response; 1466 1467 memset(&test_response, 0, sizeof(test_response)); 1468 1469 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1470 aconnector = offload_work->offload_wq->aconnector; 1471 1472 if (!aconnector) { 1473 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1474 goto skip; 1475 } 1476 1477 adev = drm_to_adev(aconnector->base.dev); 1478 dc_link = aconnector->dc_link; 1479 1480 mutex_lock(&aconnector->hpd_lock); 1481 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1482 DRM_ERROR("KMS: Failed to detect connector\n"); 1483 mutex_unlock(&aconnector->hpd_lock); 1484 1485 if (new_connection_type == dc_connection_none) 1486 goto skip; 1487 1488 if (amdgpu_in_reset(adev)) 1489 goto skip; 1490 1491 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1492 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1493 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1494 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1495 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1496 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1497 goto skip; 1498 } 1499 1500 mutex_lock(&adev->dm.dc_lock); 1501 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1502 dc_link_dp_handle_automated_test(dc_link); 1503 1504 if (aconnector->timing_changed) { 1505 /* force connector disconnect and reconnect */ 1506 force_connector_state(aconnector, DRM_FORCE_OFF); 1507 msleep(100); 1508 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1509 } 1510 1511 test_response.bits.ACK = 1; 1512 1513 core_link_write_dpcd( 1514 dc_link, 1515 DP_TEST_RESPONSE, 1516 &test_response.raw, 1517 sizeof(test_response)); 1518 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1519 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1520 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1521 /* offload_work->data is from handle_hpd_rx_irq-> 1522 * schedule_hpd_rx_offload_work.this is defer handle 1523 * for hpd short pulse. upon here, link status may be 1524 * changed, need get latest link status from dpcd 1525 * registers. if link status is good, skip run link 1526 * training again. 1527 */ 1528 union hpd_irq_data irq_data; 1529 1530 memset(&irq_data, 0, sizeof(irq_data)); 1531 1532 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1533 * request be added to work queue if link lost at end of dc_link_ 1534 * dp_handle_link_loss 1535 */ 1536 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1537 offload_work->offload_wq->is_handling_link_loss = false; 1538 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1539 1540 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1541 dc_link_check_link_loss_status(dc_link, &irq_data)) 1542 dc_link_dp_handle_link_loss(dc_link); 1543 } 1544 mutex_unlock(&adev->dm.dc_lock); 1545 1546 skip: 1547 kfree(offload_work); 1548 1549 } 1550 1551 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1552 { 1553 int max_caps = dc->caps.max_links; 1554 int i = 0; 1555 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1556 1557 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1558 1559 if (!hpd_rx_offload_wq) 1560 return NULL; 1561 1562 1563 for (i = 0; i < max_caps; i++) { 1564 hpd_rx_offload_wq[i].wq = 1565 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1566 1567 if (hpd_rx_offload_wq[i].wq == NULL) { 1568 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1569 goto out_err; 1570 } 1571 1572 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1573 } 1574 1575 return hpd_rx_offload_wq; 1576 1577 out_err: 1578 for (i = 0; i < max_caps; i++) { 1579 if (hpd_rx_offload_wq[i].wq) 1580 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1581 } 1582 kfree(hpd_rx_offload_wq); 1583 return NULL; 1584 } 1585 1586 struct amdgpu_stutter_quirk { 1587 u16 chip_vendor; 1588 u16 chip_device; 1589 u16 subsys_vendor; 1590 u16 subsys_device; 1591 u8 revision; 1592 }; 1593 1594 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1595 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1596 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1597 { 0, 0, 0, 0, 0 }, 1598 }; 1599 1600 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1601 { 1602 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1603 1604 while (p && p->chip_device != 0) { 1605 if (pdev->vendor == p->chip_vendor && 1606 pdev->device == p->chip_device && 1607 pdev->subsystem_vendor == p->subsys_vendor && 1608 pdev->subsystem_device == p->subsys_device && 1609 pdev->revision == p->revision) { 1610 return true; 1611 } 1612 ++p; 1613 } 1614 return false; 1615 } 1616 1617 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1618 { 1619 .matches = { 1620 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1621 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1622 }, 1623 }, 1624 { 1625 .matches = { 1626 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1627 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1628 }, 1629 }, 1630 { 1631 .matches = { 1632 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1633 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1634 }, 1635 }, 1636 { 1637 .matches = { 1638 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1639 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1640 }, 1641 }, 1642 { 1643 .matches = { 1644 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1645 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1646 }, 1647 }, 1648 { 1649 .matches = { 1650 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1651 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1652 }, 1653 }, 1654 { 1655 .matches = { 1656 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1657 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1658 }, 1659 }, 1660 { 1661 .matches = { 1662 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1663 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1664 }, 1665 }, 1666 { 1667 .matches = { 1668 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1669 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1670 }, 1671 }, 1672 {} 1673 /* TODO: refactor this from a fixed table to a dynamic option */ 1674 }; 1675 1676 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1677 { 1678 const struct dmi_system_id *dmi_id; 1679 1680 dm->aux_hpd_discon_quirk = false; 1681 1682 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1683 if (dmi_id) { 1684 dm->aux_hpd_discon_quirk = true; 1685 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1686 } 1687 } 1688 1689 void* 1690 dm_allocate_gpu_mem( 1691 struct amdgpu_device *adev, 1692 enum dc_gpu_mem_alloc_type type, 1693 size_t size, 1694 long long *addr) 1695 { 1696 struct dal_allocation *da; 1697 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1698 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1699 int ret; 1700 1701 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 1702 if (!da) 1703 return NULL; 1704 1705 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1706 domain, &da->bo, 1707 &da->gpu_addr, &da->cpu_ptr); 1708 1709 *addr = da->gpu_addr; 1710 1711 if (ret) { 1712 kfree(da); 1713 return NULL; 1714 } 1715 1716 /* add da to list in dm */ 1717 list_add(&da->list, &adev->dm.da_list); 1718 1719 return da->cpu_ptr; 1720 } 1721 1722 void 1723 dm_free_gpu_mem( 1724 struct amdgpu_device *adev, 1725 enum dc_gpu_mem_alloc_type type, 1726 void *pvMem) 1727 { 1728 struct dal_allocation *da; 1729 1730 /* walk the da list in DM */ 1731 list_for_each_entry(da, &adev->dm.da_list, list) { 1732 if (pvMem == da->cpu_ptr) { 1733 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1734 list_del(&da->list); 1735 kfree(da); 1736 break; 1737 } 1738 } 1739 1740 } 1741 1742 static enum dmub_status 1743 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, 1744 enum dmub_gpint_command command_code, 1745 uint16_t param, 1746 uint32_t timeout_us) 1747 { 1748 union dmub_gpint_data_register reg, test; 1749 uint32_t i; 1750 1751 /* Assume that VBIOS DMUB is ready to take commands */ 1752 1753 reg.bits.status = 1; 1754 reg.bits.command_code = command_code; 1755 reg.bits.param = param; 1756 1757 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); 1758 1759 for (i = 0; i < timeout_us; ++i) { 1760 udelay(1); 1761 1762 /* Check if our GPINT got acked */ 1763 reg.bits.status = 0; 1764 test = (union dmub_gpint_data_register) 1765 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); 1766 1767 if (test.all == reg.all) 1768 return DMUB_STATUS_OK; 1769 } 1770 1771 return DMUB_STATUS_TIMEOUT; 1772 } 1773 1774 static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) 1775 { 1776 struct dml2_soc_bb *bb; 1777 long long addr; 1778 int i = 0; 1779 uint16_t chunk; 1780 enum dmub_gpint_command send_addrs[] = { 1781 DMUB_GPINT__SET_BB_ADDR_WORD0, 1782 DMUB_GPINT__SET_BB_ADDR_WORD1, 1783 DMUB_GPINT__SET_BB_ADDR_WORD2, 1784 DMUB_GPINT__SET_BB_ADDR_WORD3, 1785 }; 1786 enum dmub_status ret; 1787 1788 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1789 case IP_VERSION(4, 0, 1): 1790 break; 1791 default: 1792 return NULL; 1793 } 1794 1795 bb = dm_allocate_gpu_mem(adev, 1796 DC_MEM_ALLOC_TYPE_GART, 1797 sizeof(struct dml2_soc_bb), 1798 &addr); 1799 if (!bb) 1800 return NULL; 1801 1802 for (i = 0; i < 4; i++) { 1803 /* Extract 16-bit chunk */ 1804 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; 1805 /* Send the chunk */ 1806 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); 1807 if (ret != DMUB_STATUS_OK) 1808 goto free_bb; 1809 } 1810 1811 /* Now ask DMUB to copy the bb */ 1812 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); 1813 if (ret != DMUB_STATUS_OK) 1814 goto free_bb; 1815 1816 return bb; 1817 1818 free_bb: 1819 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); 1820 return NULL; 1821 1822 } 1823 1824 static enum dmub_ips_disable_type dm_get_default_ips_mode( 1825 struct amdgpu_device *adev) 1826 { 1827 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; 1828 1829 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1830 case IP_VERSION(3, 5, 0): 1831 /* 1832 * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to 1833 * cause a hard hang. A fix exists for newer PMFW. 1834 * 1835 * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest 1836 * IPS state in all cases, except for s0ix and all displays off (DPMS), 1837 * where IPS2 is allowed. 1838 * 1839 * When checking pmfw version, use the major and minor only. 1840 */ 1841 if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) 1842 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1843 else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) 1844 /* 1845 * Other ASICs with DCN35 that have residency issues with 1846 * IPS2 in idle. 1847 * We want them to use IPS2 only in display off cases. 1848 */ 1849 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1850 break; 1851 case IP_VERSION(3, 5, 1): 1852 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1853 break; 1854 default: 1855 /* ASICs older than DCN35 do not have IPSs */ 1856 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) 1857 ret = DMUB_IPS_DISABLE_ALL; 1858 break; 1859 } 1860 1861 return ret; 1862 } 1863 1864 static int amdgpu_dm_init(struct amdgpu_device *adev) 1865 { 1866 struct dc_init_data init_data; 1867 struct dc_callback_init init_params; 1868 int r; 1869 1870 adev->dm.ddev = adev_to_drm(adev); 1871 adev->dm.adev = adev; 1872 1873 /* Zero all the fields */ 1874 memset(&init_data, 0, sizeof(init_data)); 1875 memset(&init_params, 0, sizeof(init_params)); 1876 1877 mutex_init(&adev->dm.dpia_aux_lock); 1878 mutex_init(&adev->dm.dc_lock); 1879 mutex_init(&adev->dm.audio_lock); 1880 1881 if (amdgpu_dm_irq_init(adev)) { 1882 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1883 goto error; 1884 } 1885 1886 init_data.asic_id.chip_family = adev->family; 1887 1888 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1889 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1890 init_data.asic_id.chip_id = adev->pdev->device; 1891 1892 init_data.asic_id.vram_width = adev->gmc.vram_width; 1893 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1894 init_data.asic_id.atombios_base_address = 1895 adev->mode_info.atom_context->bios; 1896 1897 init_data.driver = adev; 1898 1899 /* cgs_device was created in dm_sw_init() */ 1900 init_data.cgs_device = adev->dm.cgs_device; 1901 1902 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1903 1904 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1905 case IP_VERSION(2, 1, 0): 1906 switch (adev->dm.dmcub_fw_version) { 1907 case 0: /* development */ 1908 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1909 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1910 init_data.flags.disable_dmcu = false; 1911 break; 1912 default: 1913 init_data.flags.disable_dmcu = true; 1914 } 1915 break; 1916 case IP_VERSION(2, 0, 3): 1917 init_data.flags.disable_dmcu = true; 1918 break; 1919 default: 1920 break; 1921 } 1922 1923 /* APU support S/G display by default except: 1924 * ASICs before Carrizo, 1925 * RAVEN1 (Users reported stability issue) 1926 */ 1927 1928 if (adev->asic_type < CHIP_CARRIZO) { 1929 init_data.flags.gpu_vm_support = false; 1930 } else if (adev->asic_type == CHIP_RAVEN) { 1931 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1932 init_data.flags.gpu_vm_support = false; 1933 else 1934 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1935 } else { 1936 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) 1937 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); 1938 else 1939 init_data.flags.gpu_vm_support = 1940 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1941 } 1942 1943 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1944 1945 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1946 init_data.flags.fbc_support = true; 1947 1948 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1949 init_data.flags.multi_mon_pp_mclk_switch = true; 1950 1951 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1952 init_data.flags.disable_fractional_pwm = true; 1953 1954 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1955 init_data.flags.edp_no_power_sequencing = true; 1956 1957 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1958 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1959 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1960 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1961 1962 init_data.flags.seamless_boot_edp_requested = false; 1963 1964 if (amdgpu_device_seamless_boot_supported(adev)) { 1965 init_data.flags.seamless_boot_edp_requested = true; 1966 init_data.flags.allow_seamless_boot_optimization = true; 1967 DRM_INFO("Seamless boot condition check passed\n"); 1968 } 1969 1970 init_data.flags.enable_mipi_converter_optimization = true; 1971 1972 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1973 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1974 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1975 1976 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) 1977 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; 1978 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) 1979 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; 1980 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) 1981 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1982 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) 1983 init_data.flags.disable_ips = DMUB_IPS_ENABLE; 1984 else 1985 init_data.flags.disable_ips = dm_get_default_ips_mode(adev); 1986 1987 init_data.flags.disable_ips_in_vpb = 0; 1988 1989 /* Enable DWB for tested platforms only */ 1990 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) 1991 init_data.num_virtual_links = 1; 1992 1993 retrieve_dmi_info(&adev->dm); 1994 1995 if (adev->dm.bb_from_dmub) 1996 init_data.bb_from_dmub = adev->dm.bb_from_dmub; 1997 else 1998 init_data.bb_from_dmub = NULL; 1999 2000 /* Display Core create. */ 2001 adev->dm.dc = dc_create(&init_data); 2002 2003 if (adev->dm.dc) { 2004 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 2005 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 2006 } else { 2007 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 2008 goto error; 2009 } 2010 2011 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 2012 adev->dm.dc->debug.force_single_disp_pipe_split = false; 2013 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 2014 } 2015 2016 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2017 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2018 if (dm_should_disable_stutter(adev->pdev)) 2019 adev->dm.dc->debug.disable_stutter = true; 2020 2021 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 2022 adev->dm.dc->debug.disable_stutter = true; 2023 2024 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 2025 adev->dm.dc->debug.disable_dsc = true; 2026 2027 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 2028 adev->dm.dc->debug.disable_clock_gate = true; 2029 2030 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2031 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2032 2033 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2034 adev->dm.dc->debug.using_dml2 = true; 2035 adev->dm.dc->debug.using_dml21 = true; 2036 } 2037 2038 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2039 2040 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 2041 adev->dm.dc->debug.ignore_cable_id = true; 2042 2043 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 2044 DRM_INFO("DP-HDMI FRL PCON supported\n"); 2045 2046 r = dm_dmub_hw_init(adev); 2047 if (r) { 2048 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2049 goto error; 2050 } 2051 2052 dc_hardware_init(adev->dm.dc); 2053 2054 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 2055 if (!adev->dm.hpd_rx_offload_wq) { 2056 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 2057 goto error; 2058 } 2059 2060 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 2061 struct dc_phy_addr_space_config pa_config; 2062 2063 mmhub_read_system_context(adev, &pa_config); 2064 2065 // Call the DC init_memory func 2066 dc_setup_system_context(adev->dm.dc, &pa_config); 2067 } 2068 2069 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 2070 if (!adev->dm.freesync_module) { 2071 DRM_ERROR( 2072 "amdgpu: failed to initialize freesync_module.\n"); 2073 } else 2074 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 2075 adev->dm.freesync_module); 2076 2077 amdgpu_dm_init_color_mod(); 2078 2079 if (adev->dm.dc->caps.max_links > 0) { 2080 adev->dm.vblank_control_workqueue = 2081 create_singlethread_workqueue("dm_vblank_control_workqueue"); 2082 if (!adev->dm.vblank_control_workqueue) 2083 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 2084 } 2085 2086 if (adev->dm.dc->caps.ips_support && 2087 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) 2088 adev->dm.idle_workqueue = idle_create_workqueue(adev); 2089 2090 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 2091 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 2092 2093 if (!adev->dm.hdcp_workqueue) 2094 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 2095 else 2096 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 2097 2098 dc_init_callbacks(adev->dm.dc, &init_params); 2099 } 2100 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2101 init_completion(&adev->dm.dmub_aux_transfer_done); 2102 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 2103 if (!adev->dm.dmub_notify) { 2104 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 2105 goto error; 2106 } 2107 2108 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 2109 if (!adev->dm.delayed_hpd_wq) { 2110 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 2111 goto error; 2112 } 2113 2114 amdgpu_dm_outbox_init(adev); 2115 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2116 dmub_aux_setconfig_callback, false)) { 2117 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 2118 goto error; 2119 } 2120 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 2121 * It is expected that DMUB will resend any pending notifications at this point. Note 2122 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 2123 * align legacy interface initialization sequence. Connection status will be proactivly 2124 * detected once in the amdgpu_dm_initialize_drm_device. 2125 */ 2126 dc_enable_dmub_outbox(adev->dm.dc); 2127 2128 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 2129 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 2130 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 2131 } 2132 2133 if (amdgpu_dm_initialize_drm_device(adev)) { 2134 DRM_ERROR( 2135 "amdgpu: failed to initialize sw for display support.\n"); 2136 goto error; 2137 } 2138 2139 /* create fake encoders for MST */ 2140 dm_dp_create_fake_mst_encoders(adev); 2141 2142 /* TODO: Add_display_info? */ 2143 2144 /* TODO use dynamic cursor width */ 2145 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 2146 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 2147 2148 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 2149 DRM_ERROR( 2150 "amdgpu: failed to initialize sw for display support.\n"); 2151 goto error; 2152 } 2153 2154 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2155 amdgpu_dm_crtc_secure_display_create_contexts(adev); 2156 if (!adev->dm.secure_display_ctx.crtc_ctx) 2157 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 2158 2159 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) 2160 adev->dm.secure_display_ctx.support_mul_roi = true; 2161 2162 #endif 2163 2164 DRM_DEBUG_DRIVER("KMS initialized.\n"); 2165 2166 return 0; 2167 error: 2168 amdgpu_dm_fini(adev); 2169 2170 return -EINVAL; 2171 } 2172 2173 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) 2174 { 2175 struct amdgpu_device *adev = ip_block->adev; 2176 2177 amdgpu_dm_audio_fini(adev); 2178 2179 return 0; 2180 } 2181 2182 static void amdgpu_dm_fini(struct amdgpu_device *adev) 2183 { 2184 int i; 2185 2186 if (adev->dm.vblank_control_workqueue) { 2187 destroy_workqueue(adev->dm.vblank_control_workqueue); 2188 adev->dm.vblank_control_workqueue = NULL; 2189 } 2190 2191 if (adev->dm.idle_workqueue) { 2192 if (adev->dm.idle_workqueue->running) { 2193 adev->dm.idle_workqueue->enable = false; 2194 flush_work(&adev->dm.idle_workqueue->work); 2195 } 2196 2197 kfree(adev->dm.idle_workqueue); 2198 adev->dm.idle_workqueue = NULL; 2199 } 2200 2201 amdgpu_dm_destroy_drm_device(&adev->dm); 2202 2203 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2204 if (adev->dm.secure_display_ctx.crtc_ctx) { 2205 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2206 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { 2207 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); 2208 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); 2209 } 2210 } 2211 kfree(adev->dm.secure_display_ctx.crtc_ctx); 2212 adev->dm.secure_display_ctx.crtc_ctx = NULL; 2213 } 2214 #endif 2215 if (adev->dm.hdcp_workqueue) { 2216 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 2217 adev->dm.hdcp_workqueue = NULL; 2218 } 2219 2220 if (adev->dm.dc) { 2221 dc_deinit_callbacks(adev->dm.dc); 2222 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 2223 if (dc_enable_dmub_notifications(adev->dm.dc)) { 2224 kfree(adev->dm.dmub_notify); 2225 adev->dm.dmub_notify = NULL; 2226 destroy_workqueue(adev->dm.delayed_hpd_wq); 2227 adev->dm.delayed_hpd_wq = NULL; 2228 } 2229 } 2230 2231 if (adev->dm.dmub_bo) 2232 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 2233 &adev->dm.dmub_bo_gpu_addr, 2234 &adev->dm.dmub_bo_cpu_addr); 2235 2236 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { 2237 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 2238 if (adev->dm.hpd_rx_offload_wq[i].wq) { 2239 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 2240 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 2241 } 2242 } 2243 2244 kfree(adev->dm.hpd_rx_offload_wq); 2245 adev->dm.hpd_rx_offload_wq = NULL; 2246 } 2247 2248 /* DC Destroy TODO: Replace destroy DAL */ 2249 if (adev->dm.dc) 2250 dc_destroy(&adev->dm.dc); 2251 /* 2252 * TODO: pageflip, vlank interrupt 2253 * 2254 * amdgpu_dm_irq_fini(adev); 2255 */ 2256 2257 if (adev->dm.cgs_device) { 2258 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 2259 adev->dm.cgs_device = NULL; 2260 } 2261 if (adev->dm.freesync_module) { 2262 mod_freesync_destroy(adev->dm.freesync_module); 2263 adev->dm.freesync_module = NULL; 2264 } 2265 2266 mutex_destroy(&adev->dm.audio_lock); 2267 mutex_destroy(&adev->dm.dc_lock); 2268 mutex_destroy(&adev->dm.dpia_aux_lock); 2269 } 2270 2271 static int load_dmcu_fw(struct amdgpu_device *adev) 2272 { 2273 const char *fw_name_dmcu = NULL; 2274 int r; 2275 const struct dmcu_firmware_header_v1_0 *hdr; 2276 2277 switch (adev->asic_type) { 2278 #if defined(CONFIG_DRM_AMD_DC_SI) 2279 case CHIP_TAHITI: 2280 case CHIP_PITCAIRN: 2281 case CHIP_VERDE: 2282 case CHIP_OLAND: 2283 #endif 2284 case CHIP_BONAIRE: 2285 case CHIP_HAWAII: 2286 case CHIP_KAVERI: 2287 case CHIP_KABINI: 2288 case CHIP_MULLINS: 2289 case CHIP_TONGA: 2290 case CHIP_FIJI: 2291 case CHIP_CARRIZO: 2292 case CHIP_STONEY: 2293 case CHIP_POLARIS11: 2294 case CHIP_POLARIS10: 2295 case CHIP_POLARIS12: 2296 case CHIP_VEGAM: 2297 case CHIP_VEGA10: 2298 case CHIP_VEGA12: 2299 case CHIP_VEGA20: 2300 return 0; 2301 case CHIP_NAVI12: 2302 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 2303 break; 2304 case CHIP_RAVEN: 2305 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2306 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2307 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2308 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2309 else 2310 return 0; 2311 break; 2312 default: 2313 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2314 case IP_VERSION(2, 0, 2): 2315 case IP_VERSION(2, 0, 3): 2316 case IP_VERSION(2, 0, 0): 2317 case IP_VERSION(2, 1, 0): 2318 case IP_VERSION(3, 0, 0): 2319 case IP_VERSION(3, 0, 2): 2320 case IP_VERSION(3, 0, 3): 2321 case IP_VERSION(3, 0, 1): 2322 case IP_VERSION(3, 1, 2): 2323 case IP_VERSION(3, 1, 3): 2324 case IP_VERSION(3, 1, 4): 2325 case IP_VERSION(3, 1, 5): 2326 case IP_VERSION(3, 1, 6): 2327 case IP_VERSION(3, 2, 0): 2328 case IP_VERSION(3, 2, 1): 2329 case IP_VERSION(3, 5, 0): 2330 case IP_VERSION(3, 5, 1): 2331 case IP_VERSION(4, 0, 1): 2332 return 0; 2333 default: 2334 break; 2335 } 2336 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2337 return -EINVAL; 2338 } 2339 2340 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2341 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2342 return 0; 2343 } 2344 2345 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, 2346 "%s", fw_name_dmcu); 2347 if (r == -ENODEV) { 2348 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2349 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2350 adev->dm.fw_dmcu = NULL; 2351 return 0; 2352 } 2353 if (r) { 2354 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2355 fw_name_dmcu); 2356 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2357 return r; 2358 } 2359 2360 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2361 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2362 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2363 adev->firmware.fw_size += 2364 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2365 2366 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2367 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2368 adev->firmware.fw_size += 2369 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2370 2371 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2372 2373 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2374 2375 return 0; 2376 } 2377 2378 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2379 { 2380 struct amdgpu_device *adev = ctx; 2381 2382 return dm_read_reg(adev->dm.dc->ctx, address); 2383 } 2384 2385 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2386 uint32_t value) 2387 { 2388 struct amdgpu_device *adev = ctx; 2389 2390 return dm_write_reg(adev->dm.dc->ctx, address, value); 2391 } 2392 2393 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2394 { 2395 struct dmub_srv_create_params create_params; 2396 struct dmub_srv_region_params region_params; 2397 struct dmub_srv_region_info region_info; 2398 struct dmub_srv_memory_params memory_params; 2399 struct dmub_srv_fb_info *fb_info; 2400 struct dmub_srv *dmub_srv; 2401 const struct dmcub_firmware_header_v1_0 *hdr; 2402 enum dmub_asic dmub_asic; 2403 enum dmub_status status; 2404 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { 2405 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST 2406 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK 2407 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA 2408 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS 2409 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX 2410 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF 2411 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE 2412 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM 2413 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE 2414 }; 2415 int r; 2416 2417 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2418 case IP_VERSION(2, 1, 0): 2419 dmub_asic = DMUB_ASIC_DCN21; 2420 break; 2421 case IP_VERSION(3, 0, 0): 2422 dmub_asic = DMUB_ASIC_DCN30; 2423 break; 2424 case IP_VERSION(3, 0, 1): 2425 dmub_asic = DMUB_ASIC_DCN301; 2426 break; 2427 case IP_VERSION(3, 0, 2): 2428 dmub_asic = DMUB_ASIC_DCN302; 2429 break; 2430 case IP_VERSION(3, 0, 3): 2431 dmub_asic = DMUB_ASIC_DCN303; 2432 break; 2433 case IP_VERSION(3, 1, 2): 2434 case IP_VERSION(3, 1, 3): 2435 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2436 break; 2437 case IP_VERSION(3, 1, 4): 2438 dmub_asic = DMUB_ASIC_DCN314; 2439 break; 2440 case IP_VERSION(3, 1, 5): 2441 dmub_asic = DMUB_ASIC_DCN315; 2442 break; 2443 case IP_VERSION(3, 1, 6): 2444 dmub_asic = DMUB_ASIC_DCN316; 2445 break; 2446 case IP_VERSION(3, 2, 0): 2447 dmub_asic = DMUB_ASIC_DCN32; 2448 break; 2449 case IP_VERSION(3, 2, 1): 2450 dmub_asic = DMUB_ASIC_DCN321; 2451 break; 2452 case IP_VERSION(3, 5, 0): 2453 case IP_VERSION(3, 5, 1): 2454 dmub_asic = DMUB_ASIC_DCN35; 2455 break; 2456 case IP_VERSION(4, 0, 1): 2457 dmub_asic = DMUB_ASIC_DCN401; 2458 break; 2459 2460 default: 2461 /* ASIC doesn't support DMUB. */ 2462 return 0; 2463 } 2464 2465 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2466 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2467 2468 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2469 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2470 AMDGPU_UCODE_ID_DMCUB; 2471 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2472 adev->dm.dmub_fw; 2473 adev->firmware.fw_size += 2474 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2475 2476 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2477 adev->dm.dmcub_fw_version); 2478 } 2479 2480 2481 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2482 dmub_srv = adev->dm.dmub_srv; 2483 2484 if (!dmub_srv) { 2485 DRM_ERROR("Failed to allocate DMUB service!\n"); 2486 return -ENOMEM; 2487 } 2488 2489 memset(&create_params, 0, sizeof(create_params)); 2490 create_params.user_ctx = adev; 2491 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2492 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2493 create_params.asic = dmub_asic; 2494 2495 /* Create the DMUB service. */ 2496 status = dmub_srv_create(dmub_srv, &create_params); 2497 if (status != DMUB_STATUS_OK) { 2498 DRM_ERROR("Error creating DMUB service: %d\n", status); 2499 return -EINVAL; 2500 } 2501 2502 /* Calculate the size of all the regions for the DMUB service. */ 2503 memset(®ion_params, 0, sizeof(region_params)); 2504 2505 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2506 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2507 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2508 region_params.vbios_size = adev->bios_size; 2509 region_params.fw_bss_data = region_params.bss_data_size ? 2510 adev->dm.dmub_fw->data + 2511 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2512 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2513 region_params.fw_inst_const = 2514 adev->dm.dmub_fw->data + 2515 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2516 PSP_HEADER_BYTES; 2517 region_params.window_memory_type = window_memory_type; 2518 2519 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2520 ®ion_info); 2521 2522 if (status != DMUB_STATUS_OK) { 2523 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2524 return -EINVAL; 2525 } 2526 2527 /* 2528 * Allocate a framebuffer based on the total size of all the regions. 2529 * TODO: Move this into GART. 2530 */ 2531 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2532 AMDGPU_GEM_DOMAIN_VRAM | 2533 AMDGPU_GEM_DOMAIN_GTT, 2534 &adev->dm.dmub_bo, 2535 &adev->dm.dmub_bo_gpu_addr, 2536 &adev->dm.dmub_bo_cpu_addr); 2537 if (r) 2538 return r; 2539 2540 /* Rebase the regions on the framebuffer address. */ 2541 memset(&memory_params, 0, sizeof(memory_params)); 2542 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2543 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2544 memory_params.region_info = ®ion_info; 2545 memory_params.window_memory_type = window_memory_type; 2546 2547 adev->dm.dmub_fb_info = 2548 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2549 fb_info = adev->dm.dmub_fb_info; 2550 2551 if (!fb_info) { 2552 DRM_ERROR( 2553 "Failed to allocate framebuffer info for DMUB service!\n"); 2554 return -ENOMEM; 2555 } 2556 2557 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2558 if (status != DMUB_STATUS_OK) { 2559 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2560 return -EINVAL; 2561 } 2562 2563 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); 2564 2565 return 0; 2566 } 2567 2568 static int dm_sw_init(struct amdgpu_ip_block *ip_block) 2569 { 2570 struct amdgpu_device *adev = ip_block->adev; 2571 int r; 2572 2573 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 2574 2575 if (!adev->dm.cgs_device) { 2576 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 2577 return -EINVAL; 2578 } 2579 2580 /* Moved from dm init since we need to use allocations for storing bounding box data */ 2581 INIT_LIST_HEAD(&adev->dm.da_list); 2582 2583 r = dm_dmub_sw_init(adev); 2584 if (r) 2585 return r; 2586 2587 return load_dmcu_fw(adev); 2588 } 2589 2590 static int dm_sw_fini(struct amdgpu_ip_block *ip_block) 2591 { 2592 struct amdgpu_device *adev = ip_block->adev; 2593 struct dal_allocation *da; 2594 2595 list_for_each_entry(da, &adev->dm.da_list, list) { 2596 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { 2597 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 2598 list_del(&da->list); 2599 kfree(da); 2600 adev->dm.bb_from_dmub = NULL; 2601 break; 2602 } 2603 } 2604 2605 2606 kfree(adev->dm.dmub_fb_info); 2607 adev->dm.dmub_fb_info = NULL; 2608 2609 if (adev->dm.dmub_srv) { 2610 dmub_srv_destroy(adev->dm.dmub_srv); 2611 kfree(adev->dm.dmub_srv); 2612 adev->dm.dmub_srv = NULL; 2613 } 2614 2615 amdgpu_ucode_release(&adev->dm.dmub_fw); 2616 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2617 2618 return 0; 2619 } 2620 2621 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2622 { 2623 struct amdgpu_dm_connector *aconnector; 2624 struct drm_connector *connector; 2625 struct drm_connector_list_iter iter; 2626 int ret = 0; 2627 2628 drm_connector_list_iter_begin(dev, &iter); 2629 drm_for_each_connector_iter(connector, &iter) { 2630 2631 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2632 continue; 2633 2634 aconnector = to_amdgpu_dm_connector(connector); 2635 if (aconnector->dc_link->type == dc_connection_mst_branch && 2636 aconnector->mst_mgr.aux) { 2637 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", 2638 aconnector, 2639 aconnector->base.base.id); 2640 2641 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2642 if (ret < 0) { 2643 drm_err(dev, "DM_MST: Failed to start MST\n"); 2644 aconnector->dc_link->type = 2645 dc_connection_single; 2646 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2647 aconnector->dc_link); 2648 break; 2649 } 2650 } 2651 } 2652 drm_connector_list_iter_end(&iter); 2653 2654 return ret; 2655 } 2656 2657 static int dm_late_init(struct amdgpu_ip_block *ip_block) 2658 { 2659 struct amdgpu_device *adev = ip_block->adev; 2660 2661 struct dmcu_iram_parameters params; 2662 unsigned int linear_lut[16]; 2663 int i; 2664 struct dmcu *dmcu = NULL; 2665 2666 dmcu = adev->dm.dc->res_pool->dmcu; 2667 2668 for (i = 0; i < 16; i++) 2669 linear_lut[i] = 0xFFFF * i / 15; 2670 2671 params.set = 0; 2672 params.backlight_ramping_override = false; 2673 params.backlight_ramping_start = 0xCCCC; 2674 params.backlight_ramping_reduction = 0xCCCCCCCC; 2675 params.backlight_lut_array_size = 16; 2676 params.backlight_lut_array = linear_lut; 2677 2678 /* Min backlight level after ABM reduction, Don't allow below 1% 2679 * 0xFFFF x 0.01 = 0x28F 2680 */ 2681 params.min_abm_backlight = 0x28F; 2682 /* In the case where abm is implemented on dmcub, 2683 * dmcu object will be null. 2684 * ABM 2.4 and up are implemented on dmcub. 2685 */ 2686 if (dmcu) { 2687 if (!dmcu_load_iram(dmcu, params)) 2688 return -EINVAL; 2689 } else if (adev->dm.dc->ctx->dmub_srv) { 2690 struct dc_link *edp_links[MAX_NUM_EDP]; 2691 int edp_num; 2692 2693 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2694 for (i = 0; i < edp_num; i++) { 2695 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2696 return -EINVAL; 2697 } 2698 } 2699 2700 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2701 } 2702 2703 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2704 { 2705 u8 buf[UUID_SIZE]; 2706 guid_t guid; 2707 int ret; 2708 2709 mutex_lock(&mgr->lock); 2710 if (!mgr->mst_primary) 2711 goto out_fail; 2712 2713 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2714 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2715 goto out_fail; 2716 } 2717 2718 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2719 DP_MST_EN | 2720 DP_UP_REQ_EN | 2721 DP_UPSTREAM_IS_SRC); 2722 if (ret < 0) { 2723 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2724 goto out_fail; 2725 } 2726 2727 /* Some hubs forget their guids after they resume */ 2728 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 2729 if (ret != sizeof(buf)) { 2730 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2731 goto out_fail; 2732 } 2733 2734 import_guid(&guid, buf); 2735 2736 if (guid_is_null(&guid)) { 2737 guid_gen(&guid); 2738 export_guid(buf, &guid); 2739 2740 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); 2741 2742 if (ret != sizeof(buf)) { 2743 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2744 goto out_fail; 2745 } 2746 } 2747 2748 guid_copy(&mgr->mst_primary->guid, &guid); 2749 2750 out_fail: 2751 mutex_unlock(&mgr->lock); 2752 } 2753 2754 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2755 { 2756 struct amdgpu_dm_connector *aconnector; 2757 struct drm_connector *connector; 2758 struct drm_connector_list_iter iter; 2759 struct drm_dp_mst_topology_mgr *mgr; 2760 2761 drm_connector_list_iter_begin(dev, &iter); 2762 drm_for_each_connector_iter(connector, &iter) { 2763 2764 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2765 continue; 2766 2767 aconnector = to_amdgpu_dm_connector(connector); 2768 if (aconnector->dc_link->type != dc_connection_mst_branch || 2769 aconnector->mst_root) 2770 continue; 2771 2772 mgr = &aconnector->mst_mgr; 2773 2774 if (suspend) { 2775 drm_dp_mst_topology_mgr_suspend(mgr); 2776 } else { 2777 /* if extended timeout is supported in hardware, 2778 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2779 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2780 */ 2781 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2782 if (!dp_is_lttpr_present(aconnector->dc_link)) 2783 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2784 2785 /* TODO: move resume_mst_branch_status() into drm mst resume again 2786 * once topology probing work is pulled out from mst resume into mst 2787 * resume 2nd step. mst resume 2nd step should be called after old 2788 * state getting restored (i.e. drm_atomic_helper_resume()). 2789 */ 2790 resume_mst_branch_status(mgr); 2791 } 2792 } 2793 drm_connector_list_iter_end(&iter); 2794 } 2795 2796 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2797 { 2798 int ret = 0; 2799 2800 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2801 * on window driver dc implementation. 2802 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2803 * should be passed to smu during boot up and resume from s3. 2804 * boot up: dc calculate dcn watermark clock settings within dc_create, 2805 * dcn20_resource_construct 2806 * then call pplib functions below to pass the settings to smu: 2807 * smu_set_watermarks_for_clock_ranges 2808 * smu_set_watermarks_table 2809 * navi10_set_watermarks_table 2810 * smu_write_watermarks_table 2811 * 2812 * For Renoir, clock settings of dcn watermark are also fixed values. 2813 * dc has implemented different flow for window driver: 2814 * dc_hardware_init / dc_set_power_state 2815 * dcn10_init_hw 2816 * notify_wm_ranges 2817 * set_wm_ranges 2818 * -- Linux 2819 * smu_set_watermarks_for_clock_ranges 2820 * renoir_set_watermarks_table 2821 * smu_write_watermarks_table 2822 * 2823 * For Linux, 2824 * dc_hardware_init -> amdgpu_dm_init 2825 * dc_set_power_state --> dm_resume 2826 * 2827 * therefore, this function apply to navi10/12/14 but not Renoir 2828 * * 2829 */ 2830 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2831 case IP_VERSION(2, 0, 2): 2832 case IP_VERSION(2, 0, 0): 2833 break; 2834 default: 2835 return 0; 2836 } 2837 2838 ret = amdgpu_dpm_write_watermarks_table(adev); 2839 if (ret) { 2840 DRM_ERROR("Failed to update WMTABLE!\n"); 2841 return ret; 2842 } 2843 2844 return 0; 2845 } 2846 2847 /** 2848 * dm_hw_init() - Initialize DC device 2849 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2850 * 2851 * Initialize the &struct amdgpu_display_manager device. This involves calling 2852 * the initializers of each DM component, then populating the struct with them. 2853 * 2854 * Although the function implies hardware initialization, both hardware and 2855 * software are initialized here. Splitting them out to their relevant init 2856 * hooks is a future TODO item. 2857 * 2858 * Some notable things that are initialized here: 2859 * 2860 * - Display Core, both software and hardware 2861 * - DC modules that we need (freesync and color management) 2862 * - DRM software states 2863 * - Interrupt sources and handlers 2864 * - Vblank support 2865 * - Debug FS entries, if enabled 2866 */ 2867 static int dm_hw_init(struct amdgpu_ip_block *ip_block) 2868 { 2869 struct amdgpu_device *adev = ip_block->adev; 2870 int r; 2871 2872 /* Create DAL display manager */ 2873 r = amdgpu_dm_init(adev); 2874 if (r) 2875 return r; 2876 amdgpu_dm_hpd_init(adev); 2877 2878 return 0; 2879 } 2880 2881 /** 2882 * dm_hw_fini() - Teardown DC device 2883 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2884 * 2885 * Teardown components within &struct amdgpu_display_manager that require 2886 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2887 * were loaded. Also flush IRQ workqueues and disable them. 2888 */ 2889 static int dm_hw_fini(struct amdgpu_ip_block *ip_block) 2890 { 2891 struct amdgpu_device *adev = ip_block->adev; 2892 2893 amdgpu_dm_hpd_fini(adev); 2894 2895 amdgpu_dm_irq_fini(adev); 2896 amdgpu_dm_fini(adev); 2897 return 0; 2898 } 2899 2900 2901 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2902 struct dc_state *state, bool enable) 2903 { 2904 enum dc_irq_source irq_source; 2905 struct amdgpu_crtc *acrtc; 2906 int rc = -EBUSY; 2907 int i = 0; 2908 2909 for (i = 0; i < state->stream_count; i++) { 2910 acrtc = get_crtc_by_otg_inst( 2911 adev, state->stream_status[i].primary_otg_inst); 2912 2913 if (acrtc && state->stream_status[i].plane_count != 0) { 2914 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2915 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2916 if (rc) 2917 DRM_WARN("Failed to %s pflip interrupts\n", 2918 enable ? "enable" : "disable"); 2919 2920 if (enable) { 2921 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2922 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2923 } else 2924 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2925 2926 if (rc) 2927 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2928 2929 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2930 /* During gpu-reset we disable and then enable vblank irq, so 2931 * don't use amdgpu_irq_get/put() to avoid refcount change. 2932 */ 2933 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2934 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2935 } 2936 } 2937 2938 } 2939 2940 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2941 { 2942 struct dc_state *context = NULL; 2943 enum dc_status res = DC_ERROR_UNEXPECTED; 2944 int i; 2945 struct dc_stream_state *del_streams[MAX_PIPES]; 2946 int del_streams_count = 0; 2947 struct dc_commit_streams_params params = {}; 2948 2949 memset(del_streams, 0, sizeof(del_streams)); 2950 2951 context = dc_state_create_current_copy(dc); 2952 if (context == NULL) 2953 goto context_alloc_fail; 2954 2955 /* First remove from context all streams */ 2956 for (i = 0; i < context->stream_count; i++) { 2957 struct dc_stream_state *stream = context->streams[i]; 2958 2959 del_streams[del_streams_count++] = stream; 2960 } 2961 2962 /* Remove all planes for removed streams and then remove the streams */ 2963 for (i = 0; i < del_streams_count; i++) { 2964 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2965 res = DC_FAIL_DETACH_SURFACES; 2966 goto fail; 2967 } 2968 2969 res = dc_state_remove_stream(dc, context, del_streams[i]); 2970 if (res != DC_OK) 2971 goto fail; 2972 } 2973 2974 params.streams = context->streams; 2975 params.stream_count = context->stream_count; 2976 res = dc_commit_streams(dc, ¶ms); 2977 2978 fail: 2979 dc_state_release(context); 2980 2981 context_alloc_fail: 2982 return res; 2983 } 2984 2985 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2986 { 2987 int i; 2988 2989 if (dm->hpd_rx_offload_wq) { 2990 for (i = 0; i < dm->dc->caps.max_links; i++) 2991 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2992 } 2993 } 2994 2995 static int dm_suspend(struct amdgpu_ip_block *ip_block) 2996 { 2997 struct amdgpu_device *adev = ip_block->adev; 2998 struct amdgpu_display_manager *dm = &adev->dm; 2999 int ret = 0; 3000 3001 if (amdgpu_in_reset(adev)) { 3002 mutex_lock(&dm->dc_lock); 3003 3004 dc_allow_idle_optimizations(adev->dm.dc, false); 3005 3006 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 3007 3008 if (dm->cached_dc_state) 3009 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 3010 3011 amdgpu_dm_commit_zero_streams(dm->dc); 3012 3013 amdgpu_dm_irq_suspend(adev); 3014 3015 hpd_rx_irq_work_suspend(dm); 3016 3017 return ret; 3018 } 3019 3020 WARN_ON(adev->dm.cached_state); 3021 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3022 if (IS_ERR(adev->dm.cached_state)) 3023 return PTR_ERR(adev->dm.cached_state); 3024 3025 s3_handle_mst(adev_to_drm(adev), true); 3026 3027 amdgpu_dm_irq_suspend(adev); 3028 3029 hpd_rx_irq_work_suspend(dm); 3030 3031 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 3032 3033 if (dm->dc->caps.ips_support && adev->in_s0ix) 3034 dc_allow_idle_optimizations(dm->dc, true); 3035 3036 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 3037 3038 return 0; 3039 } 3040 3041 struct drm_connector * 3042 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 3043 struct drm_crtc *crtc) 3044 { 3045 u32 i; 3046 struct drm_connector_state *new_con_state; 3047 struct drm_connector *connector; 3048 struct drm_crtc *crtc_from_state; 3049 3050 for_each_new_connector_in_state(state, connector, new_con_state, i) { 3051 crtc_from_state = new_con_state->crtc; 3052 3053 if (crtc_from_state == crtc) 3054 return connector; 3055 } 3056 3057 return NULL; 3058 } 3059 3060 static void emulated_link_detect(struct dc_link *link) 3061 { 3062 struct dc_sink_init_data sink_init_data = { 0 }; 3063 struct display_sink_capability sink_caps = { 0 }; 3064 enum dc_edid_status edid_status; 3065 struct dc_context *dc_ctx = link->ctx; 3066 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 3067 struct dc_sink *sink = NULL; 3068 struct dc_sink *prev_sink = NULL; 3069 3070 link->type = dc_connection_none; 3071 prev_sink = link->local_sink; 3072 3073 if (prev_sink) 3074 dc_sink_release(prev_sink); 3075 3076 switch (link->connector_signal) { 3077 case SIGNAL_TYPE_HDMI_TYPE_A: { 3078 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3079 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 3080 break; 3081 } 3082 3083 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 3084 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3085 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 3086 break; 3087 } 3088 3089 case SIGNAL_TYPE_DVI_DUAL_LINK: { 3090 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3091 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 3092 break; 3093 } 3094 3095 case SIGNAL_TYPE_LVDS: { 3096 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3097 sink_caps.signal = SIGNAL_TYPE_LVDS; 3098 break; 3099 } 3100 3101 case SIGNAL_TYPE_EDP: { 3102 sink_caps.transaction_type = 3103 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3104 sink_caps.signal = SIGNAL_TYPE_EDP; 3105 break; 3106 } 3107 3108 case SIGNAL_TYPE_DISPLAY_PORT: { 3109 sink_caps.transaction_type = 3110 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3111 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 3112 break; 3113 } 3114 3115 default: 3116 drm_err(dev, "Invalid connector type! signal:%d\n", 3117 link->connector_signal); 3118 return; 3119 } 3120 3121 sink_init_data.link = link; 3122 sink_init_data.sink_signal = sink_caps.signal; 3123 3124 sink = dc_sink_create(&sink_init_data); 3125 if (!sink) { 3126 drm_err(dev, "Failed to create sink!\n"); 3127 return; 3128 } 3129 3130 /* dc_sink_create returns a new reference */ 3131 link->local_sink = sink; 3132 3133 edid_status = dm_helpers_read_local_edid( 3134 link->ctx, 3135 link, 3136 sink); 3137 3138 if (edid_status != EDID_OK) 3139 drm_err(dev, "Failed to read EDID\n"); 3140 3141 } 3142 3143 static void dm_gpureset_commit_state(struct dc_state *dc_state, 3144 struct amdgpu_display_manager *dm) 3145 { 3146 struct { 3147 struct dc_surface_update surface_updates[MAX_SURFACES]; 3148 struct dc_plane_info plane_infos[MAX_SURFACES]; 3149 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 3150 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 3151 struct dc_stream_update stream_update; 3152 } *bundle; 3153 int k, m; 3154 3155 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 3156 3157 if (!bundle) { 3158 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 3159 goto cleanup; 3160 } 3161 3162 for (k = 0; k < dc_state->stream_count; k++) { 3163 bundle->stream_update.stream = dc_state->streams[k]; 3164 3165 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 3166 bundle->surface_updates[m].surface = 3167 dc_state->stream_status->plane_states[m]; 3168 bundle->surface_updates[m].surface->force_full_update = 3169 true; 3170 } 3171 3172 update_planes_and_stream_adapter(dm->dc, 3173 UPDATE_TYPE_FULL, 3174 dc_state->stream_status->plane_count, 3175 dc_state->streams[k], 3176 &bundle->stream_update, 3177 bundle->surface_updates); 3178 } 3179 3180 cleanup: 3181 kfree(bundle); 3182 } 3183 3184 static int dm_resume(struct amdgpu_ip_block *ip_block) 3185 { 3186 struct amdgpu_device *adev = ip_block->adev; 3187 struct drm_device *ddev = adev_to_drm(adev); 3188 struct amdgpu_display_manager *dm = &adev->dm; 3189 struct amdgpu_dm_connector *aconnector; 3190 struct drm_connector *connector; 3191 struct drm_connector_list_iter iter; 3192 struct drm_crtc *crtc; 3193 struct drm_crtc_state *new_crtc_state; 3194 struct dm_crtc_state *dm_new_crtc_state; 3195 struct drm_plane *plane; 3196 struct drm_plane_state *new_plane_state; 3197 struct dm_plane_state *dm_new_plane_state; 3198 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 3199 enum dc_connection_type new_connection_type = dc_connection_none; 3200 struct dc_state *dc_state; 3201 int i, r, j; 3202 struct dc_commit_streams_params commit_params = {}; 3203 3204 if (dm->dc->caps.ips_support) { 3205 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3206 } 3207 3208 if (amdgpu_in_reset(adev)) { 3209 dc_state = dm->cached_dc_state; 3210 3211 /* 3212 * The dc->current_state is backed up into dm->cached_dc_state 3213 * before we commit 0 streams. 3214 * 3215 * DC will clear link encoder assignments on the real state 3216 * but the changes won't propagate over to the copy we made 3217 * before the 0 streams commit. 3218 * 3219 * DC expects that link encoder assignments are *not* valid 3220 * when committing a state, so as a workaround we can copy 3221 * off of the current state. 3222 * 3223 * We lose the previous assignments, but we had already 3224 * commit 0 streams anyway. 3225 */ 3226 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 3227 3228 r = dm_dmub_hw_init(adev); 3229 if (r) 3230 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 3231 3232 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3233 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3234 3235 dc_resume(dm->dc); 3236 3237 amdgpu_dm_irq_resume_early(adev); 3238 3239 for (i = 0; i < dc_state->stream_count; i++) { 3240 dc_state->streams[i]->mode_changed = true; 3241 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 3242 dc_state->stream_status[i].plane_states[j]->update_flags.raw 3243 = 0xffffffff; 3244 } 3245 } 3246 3247 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3248 amdgpu_dm_outbox_init(adev); 3249 dc_enable_dmub_outbox(adev->dm.dc); 3250 } 3251 3252 commit_params.streams = dc_state->streams; 3253 commit_params.stream_count = dc_state->stream_count; 3254 dc_exit_ips_for_hw_access(dm->dc); 3255 WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); 3256 3257 dm_gpureset_commit_state(dm->cached_dc_state, dm); 3258 3259 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 3260 3261 dc_state_release(dm->cached_dc_state); 3262 dm->cached_dc_state = NULL; 3263 3264 amdgpu_dm_irq_resume_late(adev); 3265 3266 mutex_unlock(&dm->dc_lock); 3267 3268 return 0; 3269 } 3270 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3271 dc_state_release(dm_state->context); 3272 dm_state->context = dc_state_create(dm->dc, NULL); 3273 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 3274 3275 /* Before powering on DC we need to re-initialize DMUB. */ 3276 dm_dmub_hw_resume(adev); 3277 3278 /* Re-enable outbox interrupts for DPIA. */ 3279 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3280 amdgpu_dm_outbox_init(adev); 3281 dc_enable_dmub_outbox(adev->dm.dc); 3282 } 3283 3284 /* power on hardware */ 3285 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3286 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3287 3288 /* program HPD filter */ 3289 dc_resume(dm->dc); 3290 3291 /* 3292 * early enable HPD Rx IRQ, should be done before set mode as short 3293 * pulse interrupts are used for MST 3294 */ 3295 amdgpu_dm_irq_resume_early(adev); 3296 3297 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 3298 s3_handle_mst(ddev, false); 3299 3300 /* Do detection*/ 3301 drm_connector_list_iter_begin(ddev, &iter); 3302 drm_for_each_connector_iter(connector, &iter) { 3303 3304 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3305 continue; 3306 3307 aconnector = to_amdgpu_dm_connector(connector); 3308 3309 if (!aconnector->dc_link) 3310 continue; 3311 3312 /* 3313 * this is the case when traversing through already created end sink 3314 * MST connectors, should be skipped 3315 */ 3316 if (aconnector->mst_root) 3317 continue; 3318 3319 mutex_lock(&aconnector->hpd_lock); 3320 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3321 DRM_ERROR("KMS: Failed to detect connector\n"); 3322 3323 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3324 emulated_link_detect(aconnector->dc_link); 3325 } else { 3326 mutex_lock(&dm->dc_lock); 3327 dc_exit_ips_for_hw_access(dm->dc); 3328 dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); 3329 mutex_unlock(&dm->dc_lock); 3330 } 3331 3332 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 3333 aconnector->fake_enable = false; 3334 3335 if (aconnector->dc_sink) 3336 dc_sink_release(aconnector->dc_sink); 3337 aconnector->dc_sink = NULL; 3338 amdgpu_dm_update_connector_after_detect(aconnector); 3339 mutex_unlock(&aconnector->hpd_lock); 3340 } 3341 drm_connector_list_iter_end(&iter); 3342 3343 /* Force mode set in atomic commit */ 3344 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3345 new_crtc_state->active_changed = true; 3346 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3347 reset_freesync_config_for_crtc(dm_new_crtc_state); 3348 } 3349 3350 /* 3351 * atomic_check is expected to create the dc states. We need to release 3352 * them here, since they were duplicated as part of the suspend 3353 * procedure. 3354 */ 3355 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3356 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3357 if (dm_new_crtc_state->stream) { 3358 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 3359 dc_stream_release(dm_new_crtc_state->stream); 3360 dm_new_crtc_state->stream = NULL; 3361 } 3362 dm_new_crtc_state->base.color_mgmt_changed = true; 3363 } 3364 3365 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 3366 dm_new_plane_state = to_dm_plane_state(new_plane_state); 3367 if (dm_new_plane_state->dc_state) { 3368 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 3369 dc_plane_state_release(dm_new_plane_state->dc_state); 3370 dm_new_plane_state->dc_state = NULL; 3371 } 3372 } 3373 3374 drm_atomic_helper_resume(ddev, dm->cached_state); 3375 3376 dm->cached_state = NULL; 3377 3378 /* Do mst topology probing after resuming cached state*/ 3379 drm_connector_list_iter_begin(ddev, &iter); 3380 drm_for_each_connector_iter(connector, &iter) { 3381 3382 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3383 continue; 3384 3385 aconnector = to_amdgpu_dm_connector(connector); 3386 if (aconnector->dc_link->type != dc_connection_mst_branch || 3387 aconnector->mst_root) 3388 continue; 3389 3390 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3391 } 3392 drm_connector_list_iter_end(&iter); 3393 3394 amdgpu_dm_irq_resume_late(adev); 3395 3396 amdgpu_dm_smu_write_watermarks_table(adev); 3397 3398 drm_kms_helper_hotplug_event(ddev); 3399 3400 return 0; 3401 } 3402 3403 /** 3404 * DOC: DM Lifecycle 3405 * 3406 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3407 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3408 * the base driver's device list to be initialized and torn down accordingly. 3409 * 3410 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3411 */ 3412 3413 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3414 .name = "dm", 3415 .early_init = dm_early_init, 3416 .late_init = dm_late_init, 3417 .sw_init = dm_sw_init, 3418 .sw_fini = dm_sw_fini, 3419 .early_fini = amdgpu_dm_early_fini, 3420 .hw_init = dm_hw_init, 3421 .hw_fini = dm_hw_fini, 3422 .suspend = dm_suspend, 3423 .resume = dm_resume, 3424 .is_idle = dm_is_idle, 3425 .wait_for_idle = dm_wait_for_idle, 3426 .check_soft_reset = dm_check_soft_reset, 3427 .soft_reset = dm_soft_reset, 3428 .set_clockgating_state = dm_set_clockgating_state, 3429 .set_powergating_state = dm_set_powergating_state, 3430 }; 3431 3432 const struct amdgpu_ip_block_version dm_ip_block = { 3433 .type = AMD_IP_BLOCK_TYPE_DCE, 3434 .major = 1, 3435 .minor = 0, 3436 .rev = 0, 3437 .funcs = &amdgpu_dm_funcs, 3438 }; 3439 3440 3441 /** 3442 * DOC: atomic 3443 * 3444 * *WIP* 3445 */ 3446 3447 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3448 .fb_create = amdgpu_display_user_framebuffer_create, 3449 .get_format_info = amdgpu_dm_plane_get_format_info, 3450 .atomic_check = amdgpu_dm_atomic_check, 3451 .atomic_commit = drm_atomic_helper_commit, 3452 }; 3453 3454 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3455 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3456 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3457 }; 3458 3459 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3460 { 3461 struct amdgpu_dm_backlight_caps *caps; 3462 struct drm_connector *conn_base; 3463 struct amdgpu_device *adev; 3464 struct drm_luminance_range_info *luminance_range; 3465 3466 if (aconnector->bl_idx == -1 || 3467 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3468 return; 3469 3470 conn_base = &aconnector->base; 3471 adev = drm_to_adev(conn_base->dev); 3472 3473 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3474 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3475 caps->aux_support = false; 3476 3477 if (caps->ext_caps->bits.oled == 1 3478 /* 3479 * || 3480 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3481 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3482 */) 3483 caps->aux_support = true; 3484 3485 if (amdgpu_backlight == 0) 3486 caps->aux_support = false; 3487 else if (amdgpu_backlight == 1) 3488 caps->aux_support = true; 3489 if (caps->aux_support) 3490 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; 3491 3492 luminance_range = &conn_base->display_info.luminance_range; 3493 3494 if (luminance_range->max_luminance) { 3495 caps->aux_min_input_signal = luminance_range->min_luminance; 3496 caps->aux_max_input_signal = luminance_range->max_luminance; 3497 } else { 3498 caps->aux_min_input_signal = 0; 3499 caps->aux_max_input_signal = 512; 3500 } 3501 } 3502 3503 void amdgpu_dm_update_connector_after_detect( 3504 struct amdgpu_dm_connector *aconnector) 3505 { 3506 struct drm_connector *connector = &aconnector->base; 3507 struct drm_device *dev = connector->dev; 3508 struct dc_sink *sink; 3509 3510 /* MST handled by drm_mst framework */ 3511 if (aconnector->mst_mgr.mst_state == true) 3512 return; 3513 3514 sink = aconnector->dc_link->local_sink; 3515 if (sink) 3516 dc_sink_retain(sink); 3517 3518 /* 3519 * Edid mgmt connector gets first update only in mode_valid hook and then 3520 * the connector sink is set to either fake or physical sink depends on link status. 3521 * Skip if already done during boot. 3522 */ 3523 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3524 && aconnector->dc_em_sink) { 3525 3526 /* 3527 * For S3 resume with headless use eml_sink to fake stream 3528 * because on resume connector->sink is set to NULL 3529 */ 3530 mutex_lock(&dev->mode_config.mutex); 3531 3532 if (sink) { 3533 if (aconnector->dc_sink) { 3534 amdgpu_dm_update_freesync_caps(connector, NULL); 3535 /* 3536 * retain and release below are used to 3537 * bump up refcount for sink because the link doesn't point 3538 * to it anymore after disconnect, so on next crtc to connector 3539 * reshuffle by UMD we will get into unwanted dc_sink release 3540 */ 3541 dc_sink_release(aconnector->dc_sink); 3542 } 3543 aconnector->dc_sink = sink; 3544 dc_sink_retain(aconnector->dc_sink); 3545 amdgpu_dm_update_freesync_caps(connector, 3546 aconnector->drm_edid); 3547 } else { 3548 amdgpu_dm_update_freesync_caps(connector, NULL); 3549 if (!aconnector->dc_sink) { 3550 aconnector->dc_sink = aconnector->dc_em_sink; 3551 dc_sink_retain(aconnector->dc_sink); 3552 } 3553 } 3554 3555 mutex_unlock(&dev->mode_config.mutex); 3556 3557 if (sink) 3558 dc_sink_release(sink); 3559 return; 3560 } 3561 3562 /* 3563 * TODO: temporary guard to look for proper fix 3564 * if this sink is MST sink, we should not do anything 3565 */ 3566 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3567 dc_sink_release(sink); 3568 return; 3569 } 3570 3571 if (aconnector->dc_sink == sink) { 3572 /* 3573 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3574 * Do nothing!! 3575 */ 3576 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", 3577 aconnector->connector_id); 3578 if (sink) 3579 dc_sink_release(sink); 3580 return; 3581 } 3582 3583 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3584 aconnector->connector_id, aconnector->dc_sink, sink); 3585 3586 mutex_lock(&dev->mode_config.mutex); 3587 3588 /* 3589 * 1. Update status of the drm connector 3590 * 2. Send an event and let userspace tell us what to do 3591 */ 3592 if (sink) { 3593 /* 3594 * TODO: check if we still need the S3 mode update workaround. 3595 * If yes, put it here. 3596 */ 3597 if (aconnector->dc_sink) { 3598 amdgpu_dm_update_freesync_caps(connector, NULL); 3599 dc_sink_release(aconnector->dc_sink); 3600 } 3601 3602 aconnector->dc_sink = sink; 3603 dc_sink_retain(aconnector->dc_sink); 3604 if (sink->dc_edid.length == 0) { 3605 aconnector->drm_edid = NULL; 3606 if (aconnector->dc_link->aux_mode) { 3607 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3608 } 3609 } else { 3610 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; 3611 3612 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); 3613 drm_edid_connector_update(connector, aconnector->drm_edid); 3614 3615 if (aconnector->dc_link->aux_mode) 3616 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, 3617 connector->display_info.source_physical_address); 3618 } 3619 3620 if (!aconnector->timing_requested) { 3621 aconnector->timing_requested = 3622 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3623 if (!aconnector->timing_requested) 3624 drm_err(dev, 3625 "failed to create aconnector->requested_timing\n"); 3626 } 3627 3628 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 3629 update_connector_ext_caps(aconnector); 3630 } else { 3631 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3632 amdgpu_dm_update_freesync_caps(connector, NULL); 3633 aconnector->num_modes = 0; 3634 dc_sink_release(aconnector->dc_sink); 3635 aconnector->dc_sink = NULL; 3636 drm_edid_free(aconnector->drm_edid); 3637 aconnector->drm_edid = NULL; 3638 kfree(aconnector->timing_requested); 3639 aconnector->timing_requested = NULL; 3640 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3641 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3642 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3643 } 3644 3645 mutex_unlock(&dev->mode_config.mutex); 3646 3647 update_subconnector_property(aconnector); 3648 3649 if (sink) 3650 dc_sink_release(sink); 3651 } 3652 3653 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3654 { 3655 struct drm_connector *connector = &aconnector->base; 3656 struct drm_device *dev = connector->dev; 3657 enum dc_connection_type new_connection_type = dc_connection_none; 3658 struct amdgpu_device *adev = drm_to_adev(dev); 3659 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3660 struct dc *dc = aconnector->dc_link->ctx->dc; 3661 bool ret = false; 3662 3663 if (adev->dm.disable_hpd_irq) 3664 return; 3665 3666 /* 3667 * In case of failure or MST no need to update connector status or notify the OS 3668 * since (for MST case) MST does this in its own context. 3669 */ 3670 mutex_lock(&aconnector->hpd_lock); 3671 3672 if (adev->dm.hdcp_workqueue) { 3673 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3674 dm_con_state->update_hdcp = true; 3675 } 3676 if (aconnector->fake_enable) 3677 aconnector->fake_enable = false; 3678 3679 aconnector->timing_changed = false; 3680 3681 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3682 DRM_ERROR("KMS: Failed to detect connector\n"); 3683 3684 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3685 emulated_link_detect(aconnector->dc_link); 3686 3687 drm_modeset_lock_all(dev); 3688 dm_restore_drm_connector_state(dev, connector); 3689 drm_modeset_unlock_all(dev); 3690 3691 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3692 drm_kms_helper_connector_hotplug_event(connector); 3693 } else { 3694 mutex_lock(&adev->dm.dc_lock); 3695 dc_exit_ips_for_hw_access(dc); 3696 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3697 mutex_unlock(&adev->dm.dc_lock); 3698 if (ret) { 3699 amdgpu_dm_update_connector_after_detect(aconnector); 3700 3701 drm_modeset_lock_all(dev); 3702 dm_restore_drm_connector_state(dev, connector); 3703 drm_modeset_unlock_all(dev); 3704 3705 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3706 drm_kms_helper_connector_hotplug_event(connector); 3707 } 3708 } 3709 mutex_unlock(&aconnector->hpd_lock); 3710 3711 } 3712 3713 static void handle_hpd_irq(void *param) 3714 { 3715 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3716 3717 handle_hpd_irq_helper(aconnector); 3718 3719 } 3720 3721 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3722 union hpd_irq_data hpd_irq_data) 3723 { 3724 struct hpd_rx_irq_offload_work *offload_work = 3725 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3726 3727 if (!offload_work) { 3728 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3729 return; 3730 } 3731 3732 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3733 offload_work->data = hpd_irq_data; 3734 offload_work->offload_wq = offload_wq; 3735 3736 queue_work(offload_wq->wq, &offload_work->work); 3737 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3738 } 3739 3740 static void handle_hpd_rx_irq(void *param) 3741 { 3742 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3743 struct drm_connector *connector = &aconnector->base; 3744 struct drm_device *dev = connector->dev; 3745 struct dc_link *dc_link = aconnector->dc_link; 3746 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3747 bool result = false; 3748 enum dc_connection_type new_connection_type = dc_connection_none; 3749 struct amdgpu_device *adev = drm_to_adev(dev); 3750 union hpd_irq_data hpd_irq_data; 3751 bool link_loss = false; 3752 bool has_left_work = false; 3753 int idx = dc_link->link_index; 3754 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3755 struct dc *dc = aconnector->dc_link->ctx->dc; 3756 3757 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3758 3759 if (adev->dm.disable_hpd_irq) 3760 return; 3761 3762 /* 3763 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3764 * conflict, after implement i2c helper, this mutex should be 3765 * retired. 3766 */ 3767 mutex_lock(&aconnector->hpd_lock); 3768 3769 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3770 &link_loss, true, &has_left_work); 3771 3772 if (!has_left_work) 3773 goto out; 3774 3775 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3776 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3777 goto out; 3778 } 3779 3780 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3781 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3782 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3783 bool skip = false; 3784 3785 /* 3786 * DOWN_REP_MSG_RDY is also handled by polling method 3787 * mgr->cbs->poll_hpd_irq() 3788 */ 3789 spin_lock(&offload_wq->offload_lock); 3790 skip = offload_wq->is_handling_mst_msg_rdy_event; 3791 3792 if (!skip) 3793 offload_wq->is_handling_mst_msg_rdy_event = true; 3794 3795 spin_unlock(&offload_wq->offload_lock); 3796 3797 if (!skip) 3798 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3799 3800 goto out; 3801 } 3802 3803 if (link_loss) { 3804 bool skip = false; 3805 3806 spin_lock(&offload_wq->offload_lock); 3807 skip = offload_wq->is_handling_link_loss; 3808 3809 if (!skip) 3810 offload_wq->is_handling_link_loss = true; 3811 3812 spin_unlock(&offload_wq->offload_lock); 3813 3814 if (!skip) 3815 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3816 3817 goto out; 3818 } 3819 } 3820 3821 out: 3822 if (result && !is_mst_root_connector) { 3823 /* Downstream Port status changed. */ 3824 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3825 DRM_ERROR("KMS: Failed to detect connector\n"); 3826 3827 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3828 emulated_link_detect(dc_link); 3829 3830 if (aconnector->fake_enable) 3831 aconnector->fake_enable = false; 3832 3833 amdgpu_dm_update_connector_after_detect(aconnector); 3834 3835 3836 drm_modeset_lock_all(dev); 3837 dm_restore_drm_connector_state(dev, connector); 3838 drm_modeset_unlock_all(dev); 3839 3840 drm_kms_helper_connector_hotplug_event(connector); 3841 } else { 3842 bool ret = false; 3843 3844 mutex_lock(&adev->dm.dc_lock); 3845 dc_exit_ips_for_hw_access(dc); 3846 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3847 mutex_unlock(&adev->dm.dc_lock); 3848 3849 if (ret) { 3850 if (aconnector->fake_enable) 3851 aconnector->fake_enable = false; 3852 3853 amdgpu_dm_update_connector_after_detect(aconnector); 3854 3855 drm_modeset_lock_all(dev); 3856 dm_restore_drm_connector_state(dev, connector); 3857 drm_modeset_unlock_all(dev); 3858 3859 drm_kms_helper_connector_hotplug_event(connector); 3860 } 3861 } 3862 } 3863 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3864 if (adev->dm.hdcp_workqueue) 3865 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3866 } 3867 3868 if (dc_link->type != dc_connection_mst_branch) 3869 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3870 3871 mutex_unlock(&aconnector->hpd_lock); 3872 } 3873 3874 static int register_hpd_handlers(struct amdgpu_device *adev) 3875 { 3876 struct drm_device *dev = adev_to_drm(adev); 3877 struct drm_connector *connector; 3878 struct amdgpu_dm_connector *aconnector; 3879 const struct dc_link *dc_link; 3880 struct dc_interrupt_params int_params = {0}; 3881 3882 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3883 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3884 3885 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3886 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, 3887 dmub_hpd_callback, true)) { 3888 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3889 return -EINVAL; 3890 } 3891 3892 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, 3893 dmub_hpd_callback, true)) { 3894 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3895 return -EINVAL; 3896 } 3897 3898 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 3899 dmub_hpd_sense_callback, true)) { 3900 DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); 3901 return -EINVAL; 3902 } 3903 } 3904 3905 list_for_each_entry(connector, 3906 &dev->mode_config.connector_list, head) { 3907 3908 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3909 continue; 3910 3911 aconnector = to_amdgpu_dm_connector(connector); 3912 dc_link = aconnector->dc_link; 3913 3914 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3915 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3916 int_params.irq_source = dc_link->irq_source_hpd; 3917 3918 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3919 int_params.irq_source < DC_IRQ_SOURCE_HPD1 || 3920 int_params.irq_source > DC_IRQ_SOURCE_HPD6) { 3921 DRM_ERROR("Failed to register hpd irq!\n"); 3922 return -EINVAL; 3923 } 3924 3925 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3926 handle_hpd_irq, (void *) aconnector)) 3927 return -ENOMEM; 3928 } 3929 3930 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3931 3932 /* Also register for DP short pulse (hpd_rx). */ 3933 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3934 int_params.irq_source = dc_link->irq_source_hpd_rx; 3935 3936 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3937 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || 3938 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { 3939 DRM_ERROR("Failed to register hpd rx irq!\n"); 3940 return -EINVAL; 3941 } 3942 3943 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3944 handle_hpd_rx_irq, (void *) aconnector)) 3945 return -ENOMEM; 3946 } 3947 } 3948 return 0; 3949 } 3950 3951 #if defined(CONFIG_DRM_AMD_DC_SI) 3952 /* Register IRQ sources and initialize IRQ callbacks */ 3953 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3954 { 3955 struct dc *dc = adev->dm.dc; 3956 struct common_irq_params *c_irq_params; 3957 struct dc_interrupt_params int_params = {0}; 3958 int r; 3959 int i; 3960 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3961 3962 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3963 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3964 3965 /* 3966 * Actions of amdgpu_irq_add_id(): 3967 * 1. Register a set() function with base driver. 3968 * Base driver will call set() function to enable/disable an 3969 * interrupt in DC hardware. 3970 * 2. Register amdgpu_dm_irq_handler(). 3971 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3972 * coming from DC hardware. 3973 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3974 * for acknowledging and handling. 3975 */ 3976 3977 /* Use VBLANK interrupt */ 3978 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3979 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3980 if (r) { 3981 DRM_ERROR("Failed to add crtc irq id!\n"); 3982 return r; 3983 } 3984 3985 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3986 int_params.irq_source = 3987 dc_interrupt_to_irq_source(dc, i + 1, 0); 3988 3989 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3990 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 3991 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 3992 DRM_ERROR("Failed to register vblank irq!\n"); 3993 return -EINVAL; 3994 } 3995 3996 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3997 3998 c_irq_params->adev = adev; 3999 c_irq_params->irq_src = int_params.irq_source; 4000 4001 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4002 dm_crtc_high_irq, c_irq_params)) 4003 return -ENOMEM; 4004 } 4005 4006 /* Use GRPH_PFLIP interrupt */ 4007 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4008 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4009 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4010 if (r) { 4011 DRM_ERROR("Failed to add page flip irq id!\n"); 4012 return r; 4013 } 4014 4015 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4016 int_params.irq_source = 4017 dc_interrupt_to_irq_source(dc, i, 0); 4018 4019 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4020 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4021 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4022 DRM_ERROR("Failed to register pflip irq!\n"); 4023 return -EINVAL; 4024 } 4025 4026 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4027 4028 c_irq_params->adev = adev; 4029 c_irq_params->irq_src = int_params.irq_source; 4030 4031 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4032 dm_pflip_high_irq, c_irq_params)) 4033 return -ENOMEM; 4034 } 4035 4036 /* HPD */ 4037 r = amdgpu_irq_add_id(adev, client_id, 4038 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4039 if (r) { 4040 DRM_ERROR("Failed to add hpd irq id!\n"); 4041 return r; 4042 } 4043 4044 r = register_hpd_handlers(adev); 4045 4046 return r; 4047 } 4048 #endif 4049 4050 /* Register IRQ sources and initialize IRQ callbacks */ 4051 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 4052 { 4053 struct dc *dc = adev->dm.dc; 4054 struct common_irq_params *c_irq_params; 4055 struct dc_interrupt_params int_params = {0}; 4056 int r; 4057 int i; 4058 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4059 4060 if (adev->family >= AMDGPU_FAMILY_AI) 4061 client_id = SOC15_IH_CLIENTID_DCE; 4062 4063 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4064 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4065 4066 /* 4067 * Actions of amdgpu_irq_add_id(): 4068 * 1. Register a set() function with base driver. 4069 * Base driver will call set() function to enable/disable an 4070 * interrupt in DC hardware. 4071 * 2. Register amdgpu_dm_irq_handler(). 4072 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4073 * coming from DC hardware. 4074 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4075 * for acknowledging and handling. 4076 */ 4077 4078 /* Use VBLANK interrupt */ 4079 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 4080 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 4081 if (r) { 4082 DRM_ERROR("Failed to add crtc irq id!\n"); 4083 return r; 4084 } 4085 4086 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4087 int_params.irq_source = 4088 dc_interrupt_to_irq_source(dc, i, 0); 4089 4090 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4091 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4092 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4093 DRM_ERROR("Failed to register vblank irq!\n"); 4094 return -EINVAL; 4095 } 4096 4097 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4098 4099 c_irq_params->adev = adev; 4100 c_irq_params->irq_src = int_params.irq_source; 4101 4102 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4103 dm_crtc_high_irq, c_irq_params)) 4104 return -ENOMEM; 4105 } 4106 4107 /* Use VUPDATE interrupt */ 4108 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 4109 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 4110 if (r) { 4111 DRM_ERROR("Failed to add vupdate irq id!\n"); 4112 return r; 4113 } 4114 4115 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4116 int_params.irq_source = 4117 dc_interrupt_to_irq_source(dc, i, 0); 4118 4119 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4120 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4121 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4122 DRM_ERROR("Failed to register vupdate irq!\n"); 4123 return -EINVAL; 4124 } 4125 4126 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4127 4128 c_irq_params->adev = adev; 4129 c_irq_params->irq_src = int_params.irq_source; 4130 4131 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4132 dm_vupdate_high_irq, c_irq_params)) 4133 return -ENOMEM; 4134 } 4135 4136 /* Use GRPH_PFLIP interrupt */ 4137 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4138 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4139 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4140 if (r) { 4141 DRM_ERROR("Failed to add page flip irq id!\n"); 4142 return r; 4143 } 4144 4145 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4146 int_params.irq_source = 4147 dc_interrupt_to_irq_source(dc, i, 0); 4148 4149 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4150 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4151 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4152 DRM_ERROR("Failed to register pflip irq!\n"); 4153 return -EINVAL; 4154 } 4155 4156 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4157 4158 c_irq_params->adev = adev; 4159 c_irq_params->irq_src = int_params.irq_source; 4160 4161 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4162 dm_pflip_high_irq, c_irq_params)) 4163 return -ENOMEM; 4164 } 4165 4166 /* HPD */ 4167 r = amdgpu_irq_add_id(adev, client_id, 4168 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4169 if (r) { 4170 DRM_ERROR("Failed to add hpd irq id!\n"); 4171 return r; 4172 } 4173 4174 r = register_hpd_handlers(adev); 4175 4176 return r; 4177 } 4178 4179 /* Register IRQ sources and initialize IRQ callbacks */ 4180 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 4181 { 4182 struct dc *dc = adev->dm.dc; 4183 struct common_irq_params *c_irq_params; 4184 struct dc_interrupt_params int_params = {0}; 4185 int r; 4186 int i; 4187 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4188 static const unsigned int vrtl_int_srcid[] = { 4189 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 4190 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 4191 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 4192 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 4193 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 4194 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 4195 }; 4196 #endif 4197 4198 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4199 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4200 4201 /* 4202 * Actions of amdgpu_irq_add_id(): 4203 * 1. Register a set() function with base driver. 4204 * Base driver will call set() function to enable/disable an 4205 * interrupt in DC hardware. 4206 * 2. Register amdgpu_dm_irq_handler(). 4207 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4208 * coming from DC hardware. 4209 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4210 * for acknowledging and handling. 4211 */ 4212 4213 /* Use VSTARTUP interrupt */ 4214 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 4215 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 4216 i++) { 4217 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 4218 4219 if (r) { 4220 DRM_ERROR("Failed to add crtc irq id!\n"); 4221 return r; 4222 } 4223 4224 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4225 int_params.irq_source = 4226 dc_interrupt_to_irq_source(dc, i, 0); 4227 4228 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4229 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4230 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4231 DRM_ERROR("Failed to register vblank irq!\n"); 4232 return -EINVAL; 4233 } 4234 4235 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4236 4237 c_irq_params->adev = adev; 4238 c_irq_params->irq_src = int_params.irq_source; 4239 4240 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4241 dm_crtc_high_irq, c_irq_params)) 4242 return -ENOMEM; 4243 } 4244 4245 /* Use otg vertical line interrupt */ 4246 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4247 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 4248 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 4249 vrtl_int_srcid[i], &adev->vline0_irq); 4250 4251 if (r) { 4252 DRM_ERROR("Failed to add vline0 irq id!\n"); 4253 return r; 4254 } 4255 4256 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4257 int_params.irq_source = 4258 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 4259 4260 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4261 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || 4262 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { 4263 DRM_ERROR("Failed to register vline0 irq!\n"); 4264 return -EINVAL; 4265 } 4266 4267 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 4268 - DC_IRQ_SOURCE_DC1_VLINE0]; 4269 4270 c_irq_params->adev = adev; 4271 c_irq_params->irq_src = int_params.irq_source; 4272 4273 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4274 dm_dcn_vertical_interrupt0_high_irq, 4275 c_irq_params)) 4276 return -ENOMEM; 4277 } 4278 #endif 4279 4280 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 4281 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 4282 * to trigger at end of each vblank, regardless of state of the lock, 4283 * matching DCE behaviour. 4284 */ 4285 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 4286 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 4287 i++) { 4288 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 4289 4290 if (r) { 4291 DRM_ERROR("Failed to add vupdate irq id!\n"); 4292 return r; 4293 } 4294 4295 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4296 int_params.irq_source = 4297 dc_interrupt_to_irq_source(dc, i, 0); 4298 4299 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4300 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4301 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4302 DRM_ERROR("Failed to register vupdate irq!\n"); 4303 return -EINVAL; 4304 } 4305 4306 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4307 4308 c_irq_params->adev = adev; 4309 c_irq_params->irq_src = int_params.irq_source; 4310 4311 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4312 dm_vupdate_high_irq, c_irq_params)) 4313 return -ENOMEM; 4314 } 4315 4316 /* Use GRPH_PFLIP interrupt */ 4317 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 4318 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 4319 i++) { 4320 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 4321 if (r) { 4322 DRM_ERROR("Failed to add page flip irq id!\n"); 4323 return r; 4324 } 4325 4326 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4327 int_params.irq_source = 4328 dc_interrupt_to_irq_source(dc, i, 0); 4329 4330 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4331 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4332 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4333 DRM_ERROR("Failed to register pflip irq!\n"); 4334 return -EINVAL; 4335 } 4336 4337 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4338 4339 c_irq_params->adev = adev; 4340 c_irq_params->irq_src = int_params.irq_source; 4341 4342 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4343 dm_pflip_high_irq, c_irq_params)) 4344 return -ENOMEM; 4345 } 4346 4347 /* HPD */ 4348 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 4349 &adev->hpd_irq); 4350 if (r) { 4351 DRM_ERROR("Failed to add hpd irq id!\n"); 4352 return r; 4353 } 4354 4355 r = register_hpd_handlers(adev); 4356 4357 return r; 4358 } 4359 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 4360 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 4361 { 4362 struct dc *dc = adev->dm.dc; 4363 struct common_irq_params *c_irq_params; 4364 struct dc_interrupt_params int_params = {0}; 4365 int r, i; 4366 4367 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4368 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4369 4370 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 4371 &adev->dmub_outbox_irq); 4372 if (r) { 4373 DRM_ERROR("Failed to add outbox irq id!\n"); 4374 return r; 4375 } 4376 4377 if (dc->ctx->dmub_srv) { 4378 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 4379 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4380 int_params.irq_source = 4381 dc_interrupt_to_irq_source(dc, i, 0); 4382 4383 c_irq_params = &adev->dm.dmub_outbox_params[0]; 4384 4385 c_irq_params->adev = adev; 4386 c_irq_params->irq_src = int_params.irq_source; 4387 4388 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4389 dm_dmub_outbox1_low_irq, c_irq_params)) 4390 return -ENOMEM; 4391 } 4392 4393 return 0; 4394 } 4395 4396 /* 4397 * Acquires the lock for the atomic state object and returns 4398 * the new atomic state. 4399 * 4400 * This should only be called during atomic check. 4401 */ 4402 int dm_atomic_get_state(struct drm_atomic_state *state, 4403 struct dm_atomic_state **dm_state) 4404 { 4405 struct drm_device *dev = state->dev; 4406 struct amdgpu_device *adev = drm_to_adev(dev); 4407 struct amdgpu_display_manager *dm = &adev->dm; 4408 struct drm_private_state *priv_state; 4409 4410 if (*dm_state) 4411 return 0; 4412 4413 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 4414 if (IS_ERR(priv_state)) 4415 return PTR_ERR(priv_state); 4416 4417 *dm_state = to_dm_atomic_state(priv_state); 4418 4419 return 0; 4420 } 4421 4422 static struct dm_atomic_state * 4423 dm_atomic_get_new_state(struct drm_atomic_state *state) 4424 { 4425 struct drm_device *dev = state->dev; 4426 struct amdgpu_device *adev = drm_to_adev(dev); 4427 struct amdgpu_display_manager *dm = &adev->dm; 4428 struct drm_private_obj *obj; 4429 struct drm_private_state *new_obj_state; 4430 int i; 4431 4432 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 4433 if (obj->funcs == dm->atomic_obj.funcs) 4434 return to_dm_atomic_state(new_obj_state); 4435 } 4436 4437 return NULL; 4438 } 4439 4440 static struct drm_private_state * 4441 dm_atomic_duplicate_state(struct drm_private_obj *obj) 4442 { 4443 struct dm_atomic_state *old_state, *new_state; 4444 4445 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 4446 if (!new_state) 4447 return NULL; 4448 4449 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 4450 4451 old_state = to_dm_atomic_state(obj->state); 4452 4453 if (old_state && old_state->context) 4454 new_state->context = dc_state_create_copy(old_state->context); 4455 4456 if (!new_state->context) { 4457 kfree(new_state); 4458 return NULL; 4459 } 4460 4461 return &new_state->base; 4462 } 4463 4464 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 4465 struct drm_private_state *state) 4466 { 4467 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4468 4469 if (dm_state && dm_state->context) 4470 dc_state_release(dm_state->context); 4471 4472 kfree(dm_state); 4473 } 4474 4475 static struct drm_private_state_funcs dm_atomic_state_funcs = { 4476 .atomic_duplicate_state = dm_atomic_duplicate_state, 4477 .atomic_destroy_state = dm_atomic_destroy_state, 4478 }; 4479 4480 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 4481 { 4482 struct dm_atomic_state *state; 4483 int r; 4484 4485 adev->mode_info.mode_config_initialized = true; 4486 4487 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 4488 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 4489 4490 adev_to_drm(adev)->mode_config.max_width = 16384; 4491 adev_to_drm(adev)->mode_config.max_height = 16384; 4492 4493 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4494 if (adev->asic_type == CHIP_HAWAII) 4495 /* disable prefer shadow for now due to hibernation issues */ 4496 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4497 else 4498 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4499 /* indicates support for immediate flip */ 4500 adev_to_drm(adev)->mode_config.async_page_flip = true; 4501 4502 state = kzalloc(sizeof(*state), GFP_KERNEL); 4503 if (!state) 4504 return -ENOMEM; 4505 4506 state->context = dc_state_create_current_copy(adev->dm.dc); 4507 if (!state->context) { 4508 kfree(state); 4509 return -ENOMEM; 4510 } 4511 4512 drm_atomic_private_obj_init(adev_to_drm(adev), 4513 &adev->dm.atomic_obj, 4514 &state->base, 4515 &dm_atomic_state_funcs); 4516 4517 r = amdgpu_display_modeset_create_props(adev); 4518 if (r) { 4519 dc_state_release(state->context); 4520 kfree(state); 4521 return r; 4522 } 4523 4524 #ifdef AMD_PRIVATE_COLOR 4525 if (amdgpu_dm_create_color_properties(adev)) { 4526 dc_state_release(state->context); 4527 kfree(state); 4528 return -ENOMEM; 4529 } 4530 #endif 4531 4532 r = amdgpu_dm_audio_init(adev); 4533 if (r) { 4534 dc_state_release(state->context); 4535 kfree(state); 4536 return r; 4537 } 4538 4539 return 0; 4540 } 4541 4542 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4543 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4544 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) 4545 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4546 4547 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4548 int bl_idx) 4549 { 4550 #if defined(CONFIG_ACPI) 4551 struct amdgpu_dm_backlight_caps caps; 4552 4553 memset(&caps, 0, sizeof(caps)); 4554 4555 if (dm->backlight_caps[bl_idx].caps_valid) 4556 return; 4557 4558 amdgpu_acpi_get_backlight_caps(&caps); 4559 4560 /* validate the firmware value is sane */ 4561 if (caps.caps_valid) { 4562 int spread = caps.max_input_signal - caps.min_input_signal; 4563 4564 if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4565 caps.min_input_signal < 0 || 4566 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4567 spread < AMDGPU_DM_MIN_SPREAD) { 4568 DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", 4569 caps.min_input_signal, caps.max_input_signal); 4570 caps.caps_valid = false; 4571 } 4572 } 4573 4574 if (caps.caps_valid) { 4575 dm->backlight_caps[bl_idx].caps_valid = true; 4576 if (caps.aux_support) 4577 return; 4578 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4579 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4580 } else { 4581 dm->backlight_caps[bl_idx].min_input_signal = 4582 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4583 dm->backlight_caps[bl_idx].max_input_signal = 4584 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4585 } 4586 #else 4587 if (dm->backlight_caps[bl_idx].aux_support) 4588 return; 4589 4590 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4591 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4592 #endif 4593 } 4594 4595 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4596 unsigned int *min, unsigned int *max) 4597 { 4598 if (!caps) 4599 return 0; 4600 4601 if (caps->aux_support) { 4602 // Firmware limits are in nits, DC API wants millinits. 4603 *max = 1000 * caps->aux_max_input_signal; 4604 *min = 1000 * caps->aux_min_input_signal; 4605 } else { 4606 // Firmware limits are 8-bit, PWM control is 16-bit. 4607 *max = 0x101 * caps->max_input_signal; 4608 *min = 0x101 * caps->min_input_signal; 4609 } 4610 return 1; 4611 } 4612 4613 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4614 uint32_t brightness) 4615 { 4616 unsigned int min, max; 4617 4618 if (!get_brightness_range(caps, &min, &max)) 4619 return brightness; 4620 4621 // Rescale 0..255 to min..max 4622 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4623 AMDGPU_MAX_BL_LEVEL); 4624 } 4625 4626 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4627 uint32_t brightness) 4628 { 4629 unsigned int min, max; 4630 4631 if (!get_brightness_range(caps, &min, &max)) 4632 return brightness; 4633 4634 if (brightness < min) 4635 return 0; 4636 // Rescale min..max to 0..255 4637 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4638 max - min); 4639 } 4640 4641 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4642 int bl_idx, 4643 u32 user_brightness) 4644 { 4645 struct amdgpu_dm_backlight_caps caps; 4646 struct dc_link *link; 4647 u32 brightness; 4648 bool rc, reallow_idle = false; 4649 4650 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4651 caps = dm->backlight_caps[bl_idx]; 4652 4653 dm->brightness[bl_idx] = user_brightness; 4654 /* update scratch register */ 4655 if (bl_idx == 0) 4656 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4657 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4658 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4659 4660 /* Change brightness based on AUX property */ 4661 mutex_lock(&dm->dc_lock); 4662 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { 4663 dc_allow_idle_optimizations(dm->dc, false); 4664 reallow_idle = true; 4665 } 4666 4667 if (caps.aux_support) { 4668 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4669 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4670 if (!rc) 4671 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4672 } else { 4673 struct set_backlight_level_params backlight_level_params = { 0 }; 4674 4675 backlight_level_params.backlight_pwm_u16_16 = brightness; 4676 backlight_level_params.transition_time_in_ms = 0; 4677 4678 rc = dc_link_set_backlight_level(link, &backlight_level_params); 4679 if (!rc) 4680 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4681 } 4682 4683 if (dm->dc->caps.ips_support && reallow_idle) 4684 dc_allow_idle_optimizations(dm->dc, true); 4685 4686 mutex_unlock(&dm->dc_lock); 4687 4688 if (rc) 4689 dm->actual_brightness[bl_idx] = user_brightness; 4690 } 4691 4692 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4693 { 4694 struct amdgpu_display_manager *dm = bl_get_data(bd); 4695 int i; 4696 4697 for (i = 0; i < dm->num_of_edps; i++) { 4698 if (bd == dm->backlight_dev[i]) 4699 break; 4700 } 4701 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4702 i = 0; 4703 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4704 4705 return 0; 4706 } 4707 4708 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4709 int bl_idx) 4710 { 4711 int ret; 4712 struct amdgpu_dm_backlight_caps caps; 4713 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4714 4715 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4716 caps = dm->backlight_caps[bl_idx]; 4717 4718 if (caps.aux_support) { 4719 u32 avg, peak; 4720 bool rc; 4721 4722 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4723 if (!rc) 4724 return dm->brightness[bl_idx]; 4725 return convert_brightness_to_user(&caps, avg); 4726 } 4727 4728 ret = dc_link_get_backlight_level(link); 4729 4730 if (ret == DC_ERROR_UNEXPECTED) 4731 return dm->brightness[bl_idx]; 4732 4733 return convert_brightness_to_user(&caps, ret); 4734 } 4735 4736 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4737 { 4738 struct amdgpu_display_manager *dm = bl_get_data(bd); 4739 int i; 4740 4741 for (i = 0; i < dm->num_of_edps; i++) { 4742 if (bd == dm->backlight_dev[i]) 4743 break; 4744 } 4745 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4746 i = 0; 4747 return amdgpu_dm_backlight_get_level(dm, i); 4748 } 4749 4750 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4751 .options = BL_CORE_SUSPENDRESUME, 4752 .get_brightness = amdgpu_dm_backlight_get_brightness, 4753 .update_status = amdgpu_dm_backlight_update_status, 4754 }; 4755 4756 static void 4757 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4758 { 4759 struct drm_device *drm = aconnector->base.dev; 4760 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4761 struct backlight_properties props = { 0 }; 4762 struct amdgpu_dm_backlight_caps caps = { 0 }; 4763 char bl_name[16]; 4764 4765 if (aconnector->bl_idx == -1) 4766 return; 4767 4768 if (!acpi_video_backlight_use_native()) { 4769 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4770 /* Try registering an ACPI video backlight device instead. */ 4771 acpi_video_register_backlight(); 4772 return; 4773 } 4774 4775 amdgpu_acpi_get_backlight_caps(&caps); 4776 if (caps.caps_valid) { 4777 if (power_supply_is_system_supplied() > 0) 4778 props.brightness = caps.ac_level; 4779 else 4780 props.brightness = caps.dc_level; 4781 } else 4782 props.brightness = AMDGPU_MAX_BL_LEVEL; 4783 4784 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4785 props.type = BACKLIGHT_RAW; 4786 4787 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4788 drm->primary->index + aconnector->bl_idx); 4789 4790 dm->backlight_dev[aconnector->bl_idx] = 4791 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4792 &amdgpu_dm_backlight_ops, &props); 4793 4794 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4795 DRM_ERROR("DM: Backlight registration failed!\n"); 4796 dm->backlight_dev[aconnector->bl_idx] = NULL; 4797 } else 4798 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4799 } 4800 4801 static int initialize_plane(struct amdgpu_display_manager *dm, 4802 struct amdgpu_mode_info *mode_info, int plane_id, 4803 enum drm_plane_type plane_type, 4804 const struct dc_plane_cap *plane_cap) 4805 { 4806 struct drm_plane *plane; 4807 unsigned long possible_crtcs; 4808 int ret = 0; 4809 4810 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4811 if (!plane) { 4812 DRM_ERROR("KMS: Failed to allocate plane\n"); 4813 return -ENOMEM; 4814 } 4815 plane->type = plane_type; 4816 4817 /* 4818 * HACK: IGT tests expect that the primary plane for a CRTC 4819 * can only have one possible CRTC. Only expose support for 4820 * any CRTC if they're not going to be used as a primary plane 4821 * for a CRTC - like overlay or underlay planes. 4822 */ 4823 possible_crtcs = 1 << plane_id; 4824 if (plane_id >= dm->dc->caps.max_streams) 4825 possible_crtcs = 0xff; 4826 4827 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4828 4829 if (ret) { 4830 DRM_ERROR("KMS: Failed to initialize plane\n"); 4831 kfree(plane); 4832 return ret; 4833 } 4834 4835 if (mode_info) 4836 mode_info->planes[plane_id] = plane; 4837 4838 return ret; 4839 } 4840 4841 4842 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4843 struct amdgpu_dm_connector *aconnector) 4844 { 4845 struct dc_link *link = aconnector->dc_link; 4846 int bl_idx = dm->num_of_edps; 4847 4848 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4849 link->type == dc_connection_none) 4850 return; 4851 4852 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4853 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4854 return; 4855 } 4856 4857 aconnector->bl_idx = bl_idx; 4858 4859 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4860 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4861 dm->backlight_link[bl_idx] = link; 4862 dm->num_of_edps++; 4863 4864 update_connector_ext_caps(aconnector); 4865 } 4866 4867 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4868 4869 /* 4870 * In this architecture, the association 4871 * connector -> encoder -> crtc 4872 * id not really requried. The crtc and connector will hold the 4873 * display_index as an abstraction to use with DAL component 4874 * 4875 * Returns 0 on success 4876 */ 4877 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4878 { 4879 struct amdgpu_display_manager *dm = &adev->dm; 4880 s32 i; 4881 struct amdgpu_dm_connector *aconnector = NULL; 4882 struct amdgpu_encoder *aencoder = NULL; 4883 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4884 u32 link_cnt; 4885 s32 primary_planes; 4886 enum dc_connection_type new_connection_type = dc_connection_none; 4887 const struct dc_plane_cap *plane; 4888 bool psr_feature_enabled = false; 4889 bool replay_feature_enabled = false; 4890 int max_overlay = dm->dc->caps.max_slave_planes; 4891 4892 dm->display_indexes_num = dm->dc->caps.max_streams; 4893 /* Update the actual used number of crtc */ 4894 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4895 4896 amdgpu_dm_set_irq_funcs(adev); 4897 4898 link_cnt = dm->dc->caps.max_links; 4899 if (amdgpu_dm_mode_config_init(dm->adev)) { 4900 DRM_ERROR("DM: Failed to initialize mode config\n"); 4901 return -EINVAL; 4902 } 4903 4904 /* There is one primary plane per CRTC */ 4905 primary_planes = dm->dc->caps.max_streams; 4906 if (primary_planes > AMDGPU_MAX_PLANES) { 4907 DRM_ERROR("DM: Plane nums out of 6 planes\n"); 4908 return -EINVAL; 4909 } 4910 4911 /* 4912 * Initialize primary planes, implicit planes for legacy IOCTLS. 4913 * Order is reversed to match iteration order in atomic check. 4914 */ 4915 for (i = (primary_planes - 1); i >= 0; i--) { 4916 plane = &dm->dc->caps.planes[i]; 4917 4918 if (initialize_plane(dm, mode_info, i, 4919 DRM_PLANE_TYPE_PRIMARY, plane)) { 4920 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4921 goto fail; 4922 } 4923 } 4924 4925 /* 4926 * Initialize overlay planes, index starting after primary planes. 4927 * These planes have a higher DRM index than the primary planes since 4928 * they should be considered as having a higher z-order. 4929 * Order is reversed to match iteration order in atomic check. 4930 * 4931 * Only support DCN for now, and only expose one so we don't encourage 4932 * userspace to use up all the pipes. 4933 */ 4934 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4935 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4936 4937 /* Do not create overlay if MPO disabled */ 4938 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4939 break; 4940 4941 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4942 continue; 4943 4944 if (!plane->pixel_format_support.argb8888) 4945 continue; 4946 4947 if (max_overlay-- == 0) 4948 break; 4949 4950 if (initialize_plane(dm, NULL, primary_planes + i, 4951 DRM_PLANE_TYPE_OVERLAY, plane)) { 4952 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4953 goto fail; 4954 } 4955 } 4956 4957 for (i = 0; i < dm->dc->caps.max_streams; i++) 4958 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4959 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4960 goto fail; 4961 } 4962 4963 /* Use Outbox interrupt */ 4964 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4965 case IP_VERSION(3, 0, 0): 4966 case IP_VERSION(3, 1, 2): 4967 case IP_VERSION(3, 1, 3): 4968 case IP_VERSION(3, 1, 4): 4969 case IP_VERSION(3, 1, 5): 4970 case IP_VERSION(3, 1, 6): 4971 case IP_VERSION(3, 2, 0): 4972 case IP_VERSION(3, 2, 1): 4973 case IP_VERSION(2, 1, 0): 4974 case IP_VERSION(3, 5, 0): 4975 case IP_VERSION(3, 5, 1): 4976 case IP_VERSION(4, 0, 1): 4977 if (register_outbox_irq_handlers(dm->adev)) { 4978 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4979 goto fail; 4980 } 4981 break; 4982 default: 4983 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4984 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4985 } 4986 4987 /* Determine whether to enable PSR support by default. */ 4988 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4989 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4990 case IP_VERSION(3, 1, 2): 4991 case IP_VERSION(3, 1, 3): 4992 case IP_VERSION(3, 1, 4): 4993 case IP_VERSION(3, 1, 5): 4994 case IP_VERSION(3, 1, 6): 4995 case IP_VERSION(3, 2, 0): 4996 case IP_VERSION(3, 2, 1): 4997 case IP_VERSION(3, 5, 0): 4998 case IP_VERSION(3, 5, 1): 4999 case IP_VERSION(4, 0, 1): 5000 psr_feature_enabled = true; 5001 break; 5002 default: 5003 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 5004 break; 5005 } 5006 } 5007 5008 /* Determine whether to enable Replay support by default. */ 5009 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 5010 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5011 case IP_VERSION(3, 1, 4): 5012 case IP_VERSION(3, 2, 0): 5013 case IP_VERSION(3, 2, 1): 5014 case IP_VERSION(3, 5, 0): 5015 case IP_VERSION(3, 5, 1): 5016 replay_feature_enabled = true; 5017 break; 5018 5019 default: 5020 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 5021 break; 5022 } 5023 } 5024 5025 if (link_cnt > MAX_LINKS) { 5026 DRM_ERROR( 5027 "KMS: Cannot support more than %d display indexes\n", 5028 MAX_LINKS); 5029 goto fail; 5030 } 5031 5032 /* loops over all connectors on the board */ 5033 for (i = 0; i < link_cnt; i++) { 5034 struct dc_link *link = NULL; 5035 5036 link = dc_get_link_at_index(dm->dc, i); 5037 5038 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5039 struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); 5040 5041 if (!wbcon) { 5042 DRM_ERROR("KMS: Failed to allocate writeback connector\n"); 5043 continue; 5044 } 5045 5046 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 5047 DRM_ERROR("KMS: Failed to initialize writeback connector\n"); 5048 kfree(wbcon); 5049 continue; 5050 } 5051 5052 link->psr_settings.psr_feature_enabled = false; 5053 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 5054 5055 continue; 5056 } 5057 5058 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 5059 if (!aconnector) 5060 goto fail; 5061 5062 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 5063 if (!aencoder) 5064 goto fail; 5065 5066 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 5067 DRM_ERROR("KMS: Failed to initialize encoder\n"); 5068 goto fail; 5069 } 5070 5071 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 5072 DRM_ERROR("KMS: Failed to initialize connector\n"); 5073 goto fail; 5074 } 5075 5076 if (dm->hpd_rx_offload_wq) 5077 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 5078 aconnector; 5079 5080 if (!dc_link_detect_connection_type(link, &new_connection_type)) 5081 DRM_ERROR("KMS: Failed to detect connector\n"); 5082 5083 if (aconnector->base.force && new_connection_type == dc_connection_none) { 5084 emulated_link_detect(link); 5085 amdgpu_dm_update_connector_after_detect(aconnector); 5086 } else { 5087 bool ret = false; 5088 5089 mutex_lock(&dm->dc_lock); 5090 dc_exit_ips_for_hw_access(dm->dc); 5091 ret = dc_link_detect(link, DETECT_REASON_BOOT); 5092 mutex_unlock(&dm->dc_lock); 5093 5094 if (ret) { 5095 amdgpu_dm_update_connector_after_detect(aconnector); 5096 setup_backlight_device(dm, aconnector); 5097 5098 /* Disable PSR if Replay can be enabled */ 5099 if (replay_feature_enabled) 5100 if (amdgpu_dm_set_replay_caps(link, aconnector)) 5101 psr_feature_enabled = false; 5102 5103 if (psr_feature_enabled) 5104 amdgpu_dm_set_psr_caps(link); 5105 } 5106 } 5107 amdgpu_set_panel_orientation(&aconnector->base); 5108 } 5109 5110 /* Software is initialized. Now we can register interrupt handlers. */ 5111 switch (adev->asic_type) { 5112 #if defined(CONFIG_DRM_AMD_DC_SI) 5113 case CHIP_TAHITI: 5114 case CHIP_PITCAIRN: 5115 case CHIP_VERDE: 5116 case CHIP_OLAND: 5117 if (dce60_register_irq_handlers(dm->adev)) { 5118 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5119 goto fail; 5120 } 5121 break; 5122 #endif 5123 case CHIP_BONAIRE: 5124 case CHIP_HAWAII: 5125 case CHIP_KAVERI: 5126 case CHIP_KABINI: 5127 case CHIP_MULLINS: 5128 case CHIP_TONGA: 5129 case CHIP_FIJI: 5130 case CHIP_CARRIZO: 5131 case CHIP_STONEY: 5132 case CHIP_POLARIS11: 5133 case CHIP_POLARIS10: 5134 case CHIP_POLARIS12: 5135 case CHIP_VEGAM: 5136 case CHIP_VEGA10: 5137 case CHIP_VEGA12: 5138 case CHIP_VEGA20: 5139 if (dce110_register_irq_handlers(dm->adev)) { 5140 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5141 goto fail; 5142 } 5143 break; 5144 default: 5145 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5146 case IP_VERSION(1, 0, 0): 5147 case IP_VERSION(1, 0, 1): 5148 case IP_VERSION(2, 0, 2): 5149 case IP_VERSION(2, 0, 3): 5150 case IP_VERSION(2, 0, 0): 5151 case IP_VERSION(2, 1, 0): 5152 case IP_VERSION(3, 0, 0): 5153 case IP_VERSION(3, 0, 2): 5154 case IP_VERSION(3, 0, 3): 5155 case IP_VERSION(3, 0, 1): 5156 case IP_VERSION(3, 1, 2): 5157 case IP_VERSION(3, 1, 3): 5158 case IP_VERSION(3, 1, 4): 5159 case IP_VERSION(3, 1, 5): 5160 case IP_VERSION(3, 1, 6): 5161 case IP_VERSION(3, 2, 0): 5162 case IP_VERSION(3, 2, 1): 5163 case IP_VERSION(3, 5, 0): 5164 case IP_VERSION(3, 5, 1): 5165 case IP_VERSION(4, 0, 1): 5166 if (dcn10_register_irq_handlers(dm->adev)) { 5167 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5168 goto fail; 5169 } 5170 break; 5171 default: 5172 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 5173 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5174 goto fail; 5175 } 5176 break; 5177 } 5178 5179 return 0; 5180 fail: 5181 kfree(aencoder); 5182 kfree(aconnector); 5183 5184 return -EINVAL; 5185 } 5186 5187 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 5188 { 5189 drm_atomic_private_obj_fini(&dm->atomic_obj); 5190 } 5191 5192 /****************************************************************************** 5193 * amdgpu_display_funcs functions 5194 *****************************************************************************/ 5195 5196 /* 5197 * dm_bandwidth_update - program display watermarks 5198 * 5199 * @adev: amdgpu_device pointer 5200 * 5201 * Calculate and program the display watermarks and line buffer allocation. 5202 */ 5203 static void dm_bandwidth_update(struct amdgpu_device *adev) 5204 { 5205 /* TODO: implement later */ 5206 } 5207 5208 static const struct amdgpu_display_funcs dm_display_funcs = { 5209 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 5210 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 5211 .backlight_set_level = NULL, /* never called for DC */ 5212 .backlight_get_level = NULL, /* never called for DC */ 5213 .hpd_sense = NULL,/* called unconditionally */ 5214 .hpd_set_polarity = NULL, /* called unconditionally */ 5215 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 5216 .page_flip_get_scanoutpos = 5217 dm_crtc_get_scanoutpos,/* called unconditionally */ 5218 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 5219 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 5220 }; 5221 5222 #if defined(CONFIG_DEBUG_KERNEL_DC) 5223 5224 static ssize_t s3_debug_store(struct device *device, 5225 struct device_attribute *attr, 5226 const char *buf, 5227 size_t count) 5228 { 5229 int ret; 5230 int s3_state; 5231 struct drm_device *drm_dev = dev_get_drvdata(device); 5232 struct amdgpu_device *adev = drm_to_adev(drm_dev); 5233 struct amdgpu_ip_block *ip_block; 5234 5235 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); 5236 if (!ip_block) 5237 return -EINVAL; 5238 5239 ret = kstrtoint(buf, 0, &s3_state); 5240 5241 if (ret == 0) { 5242 if (s3_state) { 5243 dm_resume(ip_block); 5244 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 5245 } else 5246 dm_suspend(ip_block); 5247 } 5248 5249 return ret == 0 ? count : 0; 5250 } 5251 5252 DEVICE_ATTR_WO(s3_debug); 5253 5254 #endif 5255 5256 static int dm_init_microcode(struct amdgpu_device *adev) 5257 { 5258 char *fw_name_dmub; 5259 int r; 5260 5261 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5262 case IP_VERSION(2, 1, 0): 5263 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 5264 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 5265 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 5266 break; 5267 case IP_VERSION(3, 0, 0): 5268 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 5269 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 5270 else 5271 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 5272 break; 5273 case IP_VERSION(3, 0, 1): 5274 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 5275 break; 5276 case IP_VERSION(3, 0, 2): 5277 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 5278 break; 5279 case IP_VERSION(3, 0, 3): 5280 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 5281 break; 5282 case IP_VERSION(3, 1, 2): 5283 case IP_VERSION(3, 1, 3): 5284 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 5285 break; 5286 case IP_VERSION(3, 1, 4): 5287 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 5288 break; 5289 case IP_VERSION(3, 1, 5): 5290 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 5291 break; 5292 case IP_VERSION(3, 1, 6): 5293 fw_name_dmub = FIRMWARE_DCN316_DMUB; 5294 break; 5295 case IP_VERSION(3, 2, 0): 5296 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 5297 break; 5298 case IP_VERSION(3, 2, 1): 5299 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 5300 break; 5301 case IP_VERSION(3, 5, 0): 5302 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 5303 break; 5304 case IP_VERSION(3, 5, 1): 5305 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 5306 break; 5307 case IP_VERSION(4, 0, 1): 5308 fw_name_dmub = FIRMWARE_DCN_401_DMUB; 5309 break; 5310 default: 5311 /* ASIC doesn't support DMUB. */ 5312 return 0; 5313 } 5314 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, 5315 "%s", fw_name_dmub); 5316 return r; 5317 } 5318 5319 static int dm_early_init(struct amdgpu_ip_block *ip_block) 5320 { 5321 struct amdgpu_device *adev = ip_block->adev; 5322 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5323 struct atom_context *ctx = mode_info->atom_context; 5324 int index = GetIndexIntoMasterTable(DATA, Object_Header); 5325 u16 data_offset; 5326 5327 /* if there is no object header, skip DM */ 5328 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 5329 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 5330 dev_info(adev->dev, "No object header, skipping DM\n"); 5331 return -ENOENT; 5332 } 5333 5334 switch (adev->asic_type) { 5335 #if defined(CONFIG_DRM_AMD_DC_SI) 5336 case CHIP_TAHITI: 5337 case CHIP_PITCAIRN: 5338 case CHIP_VERDE: 5339 adev->mode_info.num_crtc = 6; 5340 adev->mode_info.num_hpd = 6; 5341 adev->mode_info.num_dig = 6; 5342 break; 5343 case CHIP_OLAND: 5344 adev->mode_info.num_crtc = 2; 5345 adev->mode_info.num_hpd = 2; 5346 adev->mode_info.num_dig = 2; 5347 break; 5348 #endif 5349 case CHIP_BONAIRE: 5350 case CHIP_HAWAII: 5351 adev->mode_info.num_crtc = 6; 5352 adev->mode_info.num_hpd = 6; 5353 adev->mode_info.num_dig = 6; 5354 break; 5355 case CHIP_KAVERI: 5356 adev->mode_info.num_crtc = 4; 5357 adev->mode_info.num_hpd = 6; 5358 adev->mode_info.num_dig = 7; 5359 break; 5360 case CHIP_KABINI: 5361 case CHIP_MULLINS: 5362 adev->mode_info.num_crtc = 2; 5363 adev->mode_info.num_hpd = 6; 5364 adev->mode_info.num_dig = 6; 5365 break; 5366 case CHIP_FIJI: 5367 case CHIP_TONGA: 5368 adev->mode_info.num_crtc = 6; 5369 adev->mode_info.num_hpd = 6; 5370 adev->mode_info.num_dig = 7; 5371 break; 5372 case CHIP_CARRIZO: 5373 adev->mode_info.num_crtc = 3; 5374 adev->mode_info.num_hpd = 6; 5375 adev->mode_info.num_dig = 9; 5376 break; 5377 case CHIP_STONEY: 5378 adev->mode_info.num_crtc = 2; 5379 adev->mode_info.num_hpd = 6; 5380 adev->mode_info.num_dig = 9; 5381 break; 5382 case CHIP_POLARIS11: 5383 case CHIP_POLARIS12: 5384 adev->mode_info.num_crtc = 5; 5385 adev->mode_info.num_hpd = 5; 5386 adev->mode_info.num_dig = 5; 5387 break; 5388 case CHIP_POLARIS10: 5389 case CHIP_VEGAM: 5390 adev->mode_info.num_crtc = 6; 5391 adev->mode_info.num_hpd = 6; 5392 adev->mode_info.num_dig = 6; 5393 break; 5394 case CHIP_VEGA10: 5395 case CHIP_VEGA12: 5396 case CHIP_VEGA20: 5397 adev->mode_info.num_crtc = 6; 5398 adev->mode_info.num_hpd = 6; 5399 adev->mode_info.num_dig = 6; 5400 break; 5401 default: 5402 5403 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5404 case IP_VERSION(2, 0, 2): 5405 case IP_VERSION(3, 0, 0): 5406 adev->mode_info.num_crtc = 6; 5407 adev->mode_info.num_hpd = 6; 5408 adev->mode_info.num_dig = 6; 5409 break; 5410 case IP_VERSION(2, 0, 0): 5411 case IP_VERSION(3, 0, 2): 5412 adev->mode_info.num_crtc = 5; 5413 adev->mode_info.num_hpd = 5; 5414 adev->mode_info.num_dig = 5; 5415 break; 5416 case IP_VERSION(2, 0, 3): 5417 case IP_VERSION(3, 0, 3): 5418 adev->mode_info.num_crtc = 2; 5419 adev->mode_info.num_hpd = 2; 5420 adev->mode_info.num_dig = 2; 5421 break; 5422 case IP_VERSION(1, 0, 0): 5423 case IP_VERSION(1, 0, 1): 5424 case IP_VERSION(3, 0, 1): 5425 case IP_VERSION(2, 1, 0): 5426 case IP_VERSION(3, 1, 2): 5427 case IP_VERSION(3, 1, 3): 5428 case IP_VERSION(3, 1, 4): 5429 case IP_VERSION(3, 1, 5): 5430 case IP_VERSION(3, 1, 6): 5431 case IP_VERSION(3, 2, 0): 5432 case IP_VERSION(3, 2, 1): 5433 case IP_VERSION(3, 5, 0): 5434 case IP_VERSION(3, 5, 1): 5435 case IP_VERSION(4, 0, 1): 5436 adev->mode_info.num_crtc = 4; 5437 adev->mode_info.num_hpd = 4; 5438 adev->mode_info.num_dig = 4; 5439 break; 5440 default: 5441 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 5442 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5443 return -EINVAL; 5444 } 5445 break; 5446 } 5447 5448 if (adev->mode_info.funcs == NULL) 5449 adev->mode_info.funcs = &dm_display_funcs; 5450 5451 /* 5452 * Note: Do NOT change adev->audio_endpt_rreg and 5453 * adev->audio_endpt_wreg because they are initialised in 5454 * amdgpu_device_init() 5455 */ 5456 #if defined(CONFIG_DEBUG_KERNEL_DC) 5457 device_create_file( 5458 adev_to_drm(adev)->dev, 5459 &dev_attr_s3_debug); 5460 #endif 5461 adev->dc_enabled = true; 5462 5463 return dm_init_microcode(adev); 5464 } 5465 5466 static bool modereset_required(struct drm_crtc_state *crtc_state) 5467 { 5468 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 5469 } 5470 5471 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 5472 { 5473 drm_encoder_cleanup(encoder); 5474 kfree(encoder); 5475 } 5476 5477 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 5478 .destroy = amdgpu_dm_encoder_destroy, 5479 }; 5480 5481 static int 5482 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5483 const enum surface_pixel_format format, 5484 enum dc_color_space *color_space) 5485 { 5486 bool full_range; 5487 5488 *color_space = COLOR_SPACE_SRGB; 5489 5490 /* DRM color properties only affect non-RGB formats. */ 5491 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5492 return 0; 5493 5494 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5495 5496 switch (plane_state->color_encoding) { 5497 case DRM_COLOR_YCBCR_BT601: 5498 if (full_range) 5499 *color_space = COLOR_SPACE_YCBCR601; 5500 else 5501 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5502 break; 5503 5504 case DRM_COLOR_YCBCR_BT709: 5505 if (full_range) 5506 *color_space = COLOR_SPACE_YCBCR709; 5507 else 5508 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5509 break; 5510 5511 case DRM_COLOR_YCBCR_BT2020: 5512 if (full_range) 5513 *color_space = COLOR_SPACE_2020_YCBCR; 5514 else 5515 return -EINVAL; 5516 break; 5517 5518 default: 5519 return -EINVAL; 5520 } 5521 5522 return 0; 5523 } 5524 5525 static int 5526 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5527 const struct drm_plane_state *plane_state, 5528 const u64 tiling_flags, 5529 struct dc_plane_info *plane_info, 5530 struct dc_plane_address *address, 5531 bool tmz_surface) 5532 { 5533 const struct drm_framebuffer *fb = plane_state->fb; 5534 const struct amdgpu_framebuffer *afb = 5535 to_amdgpu_framebuffer(plane_state->fb); 5536 int ret; 5537 5538 memset(plane_info, 0, sizeof(*plane_info)); 5539 5540 switch (fb->format->format) { 5541 case DRM_FORMAT_C8: 5542 plane_info->format = 5543 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5544 break; 5545 case DRM_FORMAT_RGB565: 5546 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5547 break; 5548 case DRM_FORMAT_XRGB8888: 5549 case DRM_FORMAT_ARGB8888: 5550 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5551 break; 5552 case DRM_FORMAT_XRGB2101010: 5553 case DRM_FORMAT_ARGB2101010: 5554 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5555 break; 5556 case DRM_FORMAT_XBGR2101010: 5557 case DRM_FORMAT_ABGR2101010: 5558 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5559 break; 5560 case DRM_FORMAT_XBGR8888: 5561 case DRM_FORMAT_ABGR8888: 5562 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5563 break; 5564 case DRM_FORMAT_NV21: 5565 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5566 break; 5567 case DRM_FORMAT_NV12: 5568 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5569 break; 5570 case DRM_FORMAT_P010: 5571 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5572 break; 5573 case DRM_FORMAT_XRGB16161616F: 5574 case DRM_FORMAT_ARGB16161616F: 5575 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5576 break; 5577 case DRM_FORMAT_XBGR16161616F: 5578 case DRM_FORMAT_ABGR16161616F: 5579 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5580 break; 5581 case DRM_FORMAT_XRGB16161616: 5582 case DRM_FORMAT_ARGB16161616: 5583 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5584 break; 5585 case DRM_FORMAT_XBGR16161616: 5586 case DRM_FORMAT_ABGR16161616: 5587 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5588 break; 5589 default: 5590 DRM_ERROR( 5591 "Unsupported screen format %p4cc\n", 5592 &fb->format->format); 5593 return -EINVAL; 5594 } 5595 5596 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5597 case DRM_MODE_ROTATE_0: 5598 plane_info->rotation = ROTATION_ANGLE_0; 5599 break; 5600 case DRM_MODE_ROTATE_90: 5601 plane_info->rotation = ROTATION_ANGLE_90; 5602 break; 5603 case DRM_MODE_ROTATE_180: 5604 plane_info->rotation = ROTATION_ANGLE_180; 5605 break; 5606 case DRM_MODE_ROTATE_270: 5607 plane_info->rotation = ROTATION_ANGLE_270; 5608 break; 5609 default: 5610 plane_info->rotation = ROTATION_ANGLE_0; 5611 break; 5612 } 5613 5614 5615 plane_info->visible = true; 5616 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5617 5618 plane_info->layer_index = plane_state->normalized_zpos; 5619 5620 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5621 &plane_info->color_space); 5622 if (ret) 5623 return ret; 5624 5625 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5626 plane_info->rotation, tiling_flags, 5627 &plane_info->tiling_info, 5628 &plane_info->plane_size, 5629 &plane_info->dcc, address, 5630 tmz_surface); 5631 if (ret) 5632 return ret; 5633 5634 amdgpu_dm_plane_fill_blending_from_plane_state( 5635 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5636 &plane_info->global_alpha, &plane_info->global_alpha_value); 5637 5638 return 0; 5639 } 5640 5641 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5642 struct dc_plane_state *dc_plane_state, 5643 struct drm_plane_state *plane_state, 5644 struct drm_crtc_state *crtc_state) 5645 { 5646 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5647 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5648 struct dc_scaling_info scaling_info; 5649 struct dc_plane_info plane_info; 5650 int ret; 5651 5652 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5653 if (ret) 5654 return ret; 5655 5656 dc_plane_state->src_rect = scaling_info.src_rect; 5657 dc_plane_state->dst_rect = scaling_info.dst_rect; 5658 dc_plane_state->clip_rect = scaling_info.clip_rect; 5659 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5660 5661 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5662 afb->tiling_flags, 5663 &plane_info, 5664 &dc_plane_state->address, 5665 afb->tmz_surface); 5666 if (ret) 5667 return ret; 5668 5669 dc_plane_state->format = plane_info.format; 5670 dc_plane_state->color_space = plane_info.color_space; 5671 dc_plane_state->format = plane_info.format; 5672 dc_plane_state->plane_size = plane_info.plane_size; 5673 dc_plane_state->rotation = plane_info.rotation; 5674 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5675 dc_plane_state->stereo_format = plane_info.stereo_format; 5676 dc_plane_state->tiling_info = plane_info.tiling_info; 5677 dc_plane_state->visible = plane_info.visible; 5678 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5679 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5680 dc_plane_state->global_alpha = plane_info.global_alpha; 5681 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5682 dc_plane_state->dcc = plane_info.dcc; 5683 dc_plane_state->layer_index = plane_info.layer_index; 5684 dc_plane_state->flip_int_enabled = true; 5685 5686 /* 5687 * Always set input transfer function, since plane state is refreshed 5688 * every time. 5689 */ 5690 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, 5691 plane_state, 5692 dc_plane_state); 5693 if (ret) 5694 return ret; 5695 5696 return 0; 5697 } 5698 5699 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5700 struct rect *dirty_rect, int32_t x, 5701 s32 y, s32 width, s32 height, 5702 int *i, bool ffu) 5703 { 5704 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5705 5706 dirty_rect->x = x; 5707 dirty_rect->y = y; 5708 dirty_rect->width = width; 5709 dirty_rect->height = height; 5710 5711 if (ffu) 5712 drm_dbg(plane->dev, 5713 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5714 plane->base.id, width, height); 5715 else 5716 drm_dbg(plane->dev, 5717 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5718 plane->base.id, x, y, width, height); 5719 5720 (*i)++; 5721 } 5722 5723 /** 5724 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5725 * 5726 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5727 * remote fb 5728 * @old_plane_state: Old state of @plane 5729 * @new_plane_state: New state of @plane 5730 * @crtc_state: New state of CRTC connected to the @plane 5731 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5732 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. 5733 * If PSR SU is enabled and damage clips are available, only the regions of the screen 5734 * that have changed will be updated. If PSR SU is not enabled, 5735 * or if damage clips are not available, the entire screen will be updated. 5736 * @dirty_regions_changed: dirty regions changed 5737 * 5738 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5739 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5740 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5741 * amdgpu_dm's. 5742 * 5743 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5744 * plane with regions that require flushing to the eDP remote buffer. In 5745 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5746 * implicitly provide damage clips without any client support via the plane 5747 * bounds. 5748 */ 5749 static void fill_dc_dirty_rects(struct drm_plane *plane, 5750 struct drm_plane_state *old_plane_state, 5751 struct drm_plane_state *new_plane_state, 5752 struct drm_crtc_state *crtc_state, 5753 struct dc_flip_addrs *flip_addrs, 5754 bool is_psr_su, 5755 bool *dirty_regions_changed) 5756 { 5757 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5758 struct rect *dirty_rects = flip_addrs->dirty_rects; 5759 u32 num_clips; 5760 struct drm_mode_rect *clips; 5761 bool bb_changed; 5762 bool fb_changed; 5763 u32 i = 0; 5764 *dirty_regions_changed = false; 5765 5766 /* 5767 * Cursor plane has it's own dirty rect update interface. See 5768 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5769 */ 5770 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5771 return; 5772 5773 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5774 goto ffu; 5775 5776 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5777 clips = drm_plane_get_damage_clips(new_plane_state); 5778 5779 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && 5780 is_psr_su))) 5781 goto ffu; 5782 5783 if (!dm_crtc_state->mpo_requested) { 5784 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5785 goto ffu; 5786 5787 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5788 fill_dc_dirty_rect(new_plane_state->plane, 5789 &dirty_rects[flip_addrs->dirty_rect_count], 5790 clips->x1, clips->y1, 5791 clips->x2 - clips->x1, clips->y2 - clips->y1, 5792 &flip_addrs->dirty_rect_count, 5793 false); 5794 return; 5795 } 5796 5797 /* 5798 * MPO is requested. Add entire plane bounding box to dirty rects if 5799 * flipped to or damaged. 5800 * 5801 * If plane is moved or resized, also add old bounding box to dirty 5802 * rects. 5803 */ 5804 fb_changed = old_plane_state->fb->base.id != 5805 new_plane_state->fb->base.id; 5806 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5807 old_plane_state->crtc_y != new_plane_state->crtc_y || 5808 old_plane_state->crtc_w != new_plane_state->crtc_w || 5809 old_plane_state->crtc_h != new_plane_state->crtc_h); 5810 5811 drm_dbg(plane->dev, 5812 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5813 new_plane_state->plane->base.id, 5814 bb_changed, fb_changed, num_clips); 5815 5816 *dirty_regions_changed = bb_changed; 5817 5818 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5819 goto ffu; 5820 5821 if (bb_changed) { 5822 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5823 new_plane_state->crtc_x, 5824 new_plane_state->crtc_y, 5825 new_plane_state->crtc_w, 5826 new_plane_state->crtc_h, &i, false); 5827 5828 /* Add old plane bounding-box if plane is moved or resized */ 5829 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5830 old_plane_state->crtc_x, 5831 old_plane_state->crtc_y, 5832 old_plane_state->crtc_w, 5833 old_plane_state->crtc_h, &i, false); 5834 } 5835 5836 if (num_clips) { 5837 for (; i < num_clips; clips++) 5838 fill_dc_dirty_rect(new_plane_state->plane, 5839 &dirty_rects[i], clips->x1, 5840 clips->y1, clips->x2 - clips->x1, 5841 clips->y2 - clips->y1, &i, false); 5842 } else if (fb_changed && !bb_changed) { 5843 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5844 new_plane_state->crtc_x, 5845 new_plane_state->crtc_y, 5846 new_plane_state->crtc_w, 5847 new_plane_state->crtc_h, &i, false); 5848 } 5849 5850 flip_addrs->dirty_rect_count = i; 5851 return; 5852 5853 ffu: 5854 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5855 dm_crtc_state->base.mode.crtc_hdisplay, 5856 dm_crtc_state->base.mode.crtc_vdisplay, 5857 &flip_addrs->dirty_rect_count, true); 5858 } 5859 5860 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5861 const struct dm_connector_state *dm_state, 5862 struct dc_stream_state *stream) 5863 { 5864 enum amdgpu_rmx_type rmx_type; 5865 5866 struct rect src = { 0 }; /* viewport in composition space*/ 5867 struct rect dst = { 0 }; /* stream addressable area */ 5868 5869 /* no mode. nothing to be done */ 5870 if (!mode) 5871 return; 5872 5873 /* Full screen scaling by default */ 5874 src.width = mode->hdisplay; 5875 src.height = mode->vdisplay; 5876 dst.width = stream->timing.h_addressable; 5877 dst.height = stream->timing.v_addressable; 5878 5879 if (dm_state) { 5880 rmx_type = dm_state->scaling; 5881 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5882 if (src.width * dst.height < 5883 src.height * dst.width) { 5884 /* height needs less upscaling/more downscaling */ 5885 dst.width = src.width * 5886 dst.height / src.height; 5887 } else { 5888 /* width needs less upscaling/more downscaling */ 5889 dst.height = src.height * 5890 dst.width / src.width; 5891 } 5892 } else if (rmx_type == RMX_CENTER) { 5893 dst = src; 5894 } 5895 5896 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5897 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5898 5899 if (dm_state->underscan_enable) { 5900 dst.x += dm_state->underscan_hborder / 2; 5901 dst.y += dm_state->underscan_vborder / 2; 5902 dst.width -= dm_state->underscan_hborder; 5903 dst.height -= dm_state->underscan_vborder; 5904 } 5905 } 5906 5907 stream->src = src; 5908 stream->dst = dst; 5909 5910 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5911 dst.x, dst.y, dst.width, dst.height); 5912 5913 } 5914 5915 static enum dc_color_depth 5916 convert_color_depth_from_display_info(const struct drm_connector *connector, 5917 bool is_y420, int requested_bpc) 5918 { 5919 u8 bpc; 5920 5921 if (is_y420) { 5922 bpc = 8; 5923 5924 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5925 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5926 bpc = 16; 5927 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5928 bpc = 12; 5929 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5930 bpc = 10; 5931 } else { 5932 bpc = (uint8_t)connector->display_info.bpc; 5933 /* Assume 8 bpc by default if no bpc is specified. */ 5934 bpc = bpc ? bpc : 8; 5935 } 5936 5937 if (requested_bpc > 0) { 5938 /* 5939 * Cap display bpc based on the user requested value. 5940 * 5941 * The value for state->max_bpc may not correctly updated 5942 * depending on when the connector gets added to the state 5943 * or if this was called outside of atomic check, so it 5944 * can't be used directly. 5945 */ 5946 bpc = min_t(u8, bpc, requested_bpc); 5947 5948 /* Round down to the nearest even number. */ 5949 bpc = bpc - (bpc & 1); 5950 } 5951 5952 switch (bpc) { 5953 case 0: 5954 /* 5955 * Temporary Work around, DRM doesn't parse color depth for 5956 * EDID revision before 1.4 5957 * TODO: Fix edid parsing 5958 */ 5959 return COLOR_DEPTH_888; 5960 case 6: 5961 return COLOR_DEPTH_666; 5962 case 8: 5963 return COLOR_DEPTH_888; 5964 case 10: 5965 return COLOR_DEPTH_101010; 5966 case 12: 5967 return COLOR_DEPTH_121212; 5968 case 14: 5969 return COLOR_DEPTH_141414; 5970 case 16: 5971 return COLOR_DEPTH_161616; 5972 default: 5973 return COLOR_DEPTH_UNDEFINED; 5974 } 5975 } 5976 5977 static enum dc_aspect_ratio 5978 get_aspect_ratio(const struct drm_display_mode *mode_in) 5979 { 5980 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5981 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5982 } 5983 5984 static enum dc_color_space 5985 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5986 const struct drm_connector_state *connector_state) 5987 { 5988 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5989 5990 switch (connector_state->colorspace) { 5991 case DRM_MODE_COLORIMETRY_BT601_YCC: 5992 if (dc_crtc_timing->flags.Y_ONLY) 5993 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5994 else 5995 color_space = COLOR_SPACE_YCBCR601; 5996 break; 5997 case DRM_MODE_COLORIMETRY_BT709_YCC: 5998 if (dc_crtc_timing->flags.Y_ONLY) 5999 color_space = COLOR_SPACE_YCBCR709_LIMITED; 6000 else 6001 color_space = COLOR_SPACE_YCBCR709; 6002 break; 6003 case DRM_MODE_COLORIMETRY_OPRGB: 6004 color_space = COLOR_SPACE_ADOBERGB; 6005 break; 6006 case DRM_MODE_COLORIMETRY_BT2020_RGB: 6007 case DRM_MODE_COLORIMETRY_BT2020_YCC: 6008 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 6009 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 6010 else 6011 color_space = COLOR_SPACE_2020_YCBCR; 6012 break; 6013 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 6014 default: 6015 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 6016 color_space = COLOR_SPACE_SRGB; 6017 /* 6018 * 27030khz is the separation point between HDTV and SDTV 6019 * according to HDMI spec, we use YCbCr709 and YCbCr601 6020 * respectively 6021 */ 6022 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 6023 if (dc_crtc_timing->flags.Y_ONLY) 6024 color_space = 6025 COLOR_SPACE_YCBCR709_LIMITED; 6026 else 6027 color_space = COLOR_SPACE_YCBCR709; 6028 } else { 6029 if (dc_crtc_timing->flags.Y_ONLY) 6030 color_space = 6031 COLOR_SPACE_YCBCR601_LIMITED; 6032 else 6033 color_space = COLOR_SPACE_YCBCR601; 6034 } 6035 break; 6036 } 6037 6038 return color_space; 6039 } 6040 6041 static enum display_content_type 6042 get_output_content_type(const struct drm_connector_state *connector_state) 6043 { 6044 switch (connector_state->content_type) { 6045 default: 6046 case DRM_MODE_CONTENT_TYPE_NO_DATA: 6047 return DISPLAY_CONTENT_TYPE_NO_DATA; 6048 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 6049 return DISPLAY_CONTENT_TYPE_GRAPHICS; 6050 case DRM_MODE_CONTENT_TYPE_PHOTO: 6051 return DISPLAY_CONTENT_TYPE_PHOTO; 6052 case DRM_MODE_CONTENT_TYPE_CINEMA: 6053 return DISPLAY_CONTENT_TYPE_CINEMA; 6054 case DRM_MODE_CONTENT_TYPE_GAME: 6055 return DISPLAY_CONTENT_TYPE_GAME; 6056 } 6057 } 6058 6059 static bool adjust_colour_depth_from_display_info( 6060 struct dc_crtc_timing *timing_out, 6061 const struct drm_display_info *info) 6062 { 6063 enum dc_color_depth depth = timing_out->display_color_depth; 6064 int normalized_clk; 6065 6066 do { 6067 normalized_clk = timing_out->pix_clk_100hz / 10; 6068 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6069 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6070 normalized_clk /= 2; 6071 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6072 switch (depth) { 6073 case COLOR_DEPTH_888: 6074 break; 6075 case COLOR_DEPTH_101010: 6076 normalized_clk = (normalized_clk * 30) / 24; 6077 break; 6078 case COLOR_DEPTH_121212: 6079 normalized_clk = (normalized_clk * 36) / 24; 6080 break; 6081 case COLOR_DEPTH_161616: 6082 normalized_clk = (normalized_clk * 48) / 24; 6083 break; 6084 default: 6085 /* The above depths are the only ones valid for HDMI. */ 6086 return false; 6087 } 6088 if (normalized_clk <= info->max_tmds_clock) { 6089 timing_out->display_color_depth = depth; 6090 return true; 6091 } 6092 } while (--depth > COLOR_DEPTH_666); 6093 return false; 6094 } 6095 6096 static void fill_stream_properties_from_drm_display_mode( 6097 struct dc_stream_state *stream, 6098 const struct drm_display_mode *mode_in, 6099 const struct drm_connector *connector, 6100 const struct drm_connector_state *connector_state, 6101 const struct dc_stream_state *old_stream, 6102 int requested_bpc) 6103 { 6104 struct dc_crtc_timing *timing_out = &stream->timing; 6105 const struct drm_display_info *info = &connector->display_info; 6106 struct amdgpu_dm_connector *aconnector = NULL; 6107 struct hdmi_vendor_infoframe hv_frame; 6108 struct hdmi_avi_infoframe avi_frame; 6109 6110 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 6111 aconnector = to_amdgpu_dm_connector(connector); 6112 6113 memset(&hv_frame, 0, sizeof(hv_frame)); 6114 memset(&avi_frame, 0, sizeof(avi_frame)); 6115 6116 timing_out->h_border_left = 0; 6117 timing_out->h_border_right = 0; 6118 timing_out->v_border_top = 0; 6119 timing_out->v_border_bottom = 0; 6120 /* TODO: un-hardcode */ 6121 if (drm_mode_is_420_only(info, mode_in) 6122 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6123 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6124 else if (drm_mode_is_420_also(info, mode_in) 6125 && aconnector 6126 && aconnector->force_yuv420_output) 6127 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6128 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6129 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6130 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6131 else 6132 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6133 6134 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6135 timing_out->display_color_depth = convert_color_depth_from_display_info( 6136 connector, 6137 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6138 requested_bpc); 6139 timing_out->scan_type = SCANNING_TYPE_NODATA; 6140 timing_out->hdmi_vic = 0; 6141 6142 if (old_stream) { 6143 timing_out->vic = old_stream->timing.vic; 6144 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6145 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6146 } else { 6147 timing_out->vic = drm_match_cea_mode(mode_in); 6148 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6149 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6150 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6151 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6152 } 6153 6154 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6155 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 6156 timing_out->vic = avi_frame.video_code; 6157 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 6158 timing_out->hdmi_vic = hv_frame.vic; 6159 } 6160 6161 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 6162 timing_out->h_addressable = mode_in->hdisplay; 6163 timing_out->h_total = mode_in->htotal; 6164 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6165 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6166 timing_out->v_total = mode_in->vtotal; 6167 timing_out->v_addressable = mode_in->vdisplay; 6168 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6169 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6170 timing_out->pix_clk_100hz = mode_in->clock * 10; 6171 } else { 6172 timing_out->h_addressable = mode_in->crtc_hdisplay; 6173 timing_out->h_total = mode_in->crtc_htotal; 6174 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6175 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6176 timing_out->v_total = mode_in->crtc_vtotal; 6177 timing_out->v_addressable = mode_in->crtc_vdisplay; 6178 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6179 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6180 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6181 } 6182 6183 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6184 6185 stream->out_transfer_func.type = TF_TYPE_PREDEFINED; 6186 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; 6187 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6188 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6189 drm_mode_is_420_also(info, mode_in) && 6190 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6191 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6192 adjust_colour_depth_from_display_info(timing_out, info); 6193 } 6194 } 6195 6196 stream->output_color_space = get_output_color_space(timing_out, connector_state); 6197 stream->content_type = get_output_content_type(connector_state); 6198 } 6199 6200 static void fill_audio_info(struct audio_info *audio_info, 6201 const struct drm_connector *drm_connector, 6202 const struct dc_sink *dc_sink) 6203 { 6204 int i = 0; 6205 int cea_revision = 0; 6206 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6207 6208 audio_info->manufacture_id = edid_caps->manufacturer_id; 6209 audio_info->product_id = edid_caps->product_id; 6210 6211 cea_revision = drm_connector->display_info.cea_rev; 6212 6213 strscpy(audio_info->display_name, 6214 edid_caps->display_name, 6215 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6216 6217 if (cea_revision >= 3) { 6218 audio_info->mode_count = edid_caps->audio_mode_count; 6219 6220 for (i = 0; i < audio_info->mode_count; ++i) { 6221 audio_info->modes[i].format_code = 6222 (enum audio_format_code) 6223 (edid_caps->audio_modes[i].format_code); 6224 audio_info->modes[i].channel_count = 6225 edid_caps->audio_modes[i].channel_count; 6226 audio_info->modes[i].sample_rates.all = 6227 edid_caps->audio_modes[i].sample_rate; 6228 audio_info->modes[i].sample_size = 6229 edid_caps->audio_modes[i].sample_size; 6230 } 6231 } 6232 6233 audio_info->flags.all = edid_caps->speaker_flags; 6234 6235 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6236 if (drm_connector->latency_present[0]) { 6237 audio_info->video_latency = drm_connector->video_latency[0]; 6238 audio_info->audio_latency = drm_connector->audio_latency[0]; 6239 } 6240 6241 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6242 6243 } 6244 6245 static void 6246 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6247 struct drm_display_mode *dst_mode) 6248 { 6249 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6250 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6251 dst_mode->crtc_clock = src_mode->crtc_clock; 6252 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6253 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6254 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6255 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6256 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6257 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6258 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6259 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6260 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6261 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6262 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6263 } 6264 6265 static void 6266 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6267 const struct drm_display_mode *native_mode, 6268 bool scale_enabled) 6269 { 6270 if (scale_enabled) { 6271 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6272 } else if (native_mode->clock == drm_mode->clock && 6273 native_mode->htotal == drm_mode->htotal && 6274 native_mode->vtotal == drm_mode->vtotal) { 6275 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6276 } else { 6277 /* no scaling nor amdgpu inserted, no need to patch */ 6278 } 6279 } 6280 6281 static struct dc_sink * 6282 create_fake_sink(struct dc_link *link) 6283 { 6284 struct dc_sink_init_data sink_init_data = { 0 }; 6285 struct dc_sink *sink = NULL; 6286 6287 sink_init_data.link = link; 6288 sink_init_data.sink_signal = link->connector_signal; 6289 6290 sink = dc_sink_create(&sink_init_data); 6291 if (!sink) { 6292 DRM_ERROR("Failed to create sink!\n"); 6293 return NULL; 6294 } 6295 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6296 6297 return sink; 6298 } 6299 6300 static void set_multisync_trigger_params( 6301 struct dc_stream_state *stream) 6302 { 6303 struct dc_stream_state *master = NULL; 6304 6305 if (stream->triggered_crtc_reset.enabled) { 6306 master = stream->triggered_crtc_reset.event_source; 6307 stream->triggered_crtc_reset.event = 6308 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6309 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6310 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6311 } 6312 } 6313 6314 static void set_master_stream(struct dc_stream_state *stream_set[], 6315 int stream_count) 6316 { 6317 int j, highest_rfr = 0, master_stream = 0; 6318 6319 for (j = 0; j < stream_count; j++) { 6320 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6321 int refresh_rate = 0; 6322 6323 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6324 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6325 if (refresh_rate > highest_rfr) { 6326 highest_rfr = refresh_rate; 6327 master_stream = j; 6328 } 6329 } 6330 } 6331 for (j = 0; j < stream_count; j++) { 6332 if (stream_set[j]) 6333 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6334 } 6335 } 6336 6337 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6338 { 6339 int i = 0; 6340 struct dc_stream_state *stream; 6341 6342 if (context->stream_count < 2) 6343 return; 6344 for (i = 0; i < context->stream_count ; i++) { 6345 if (!context->streams[i]) 6346 continue; 6347 /* 6348 * TODO: add a function to read AMD VSDB bits and set 6349 * crtc_sync_master.multi_sync_enabled flag 6350 * For now it's set to false 6351 */ 6352 } 6353 6354 set_master_stream(context->streams, context->stream_count); 6355 6356 for (i = 0; i < context->stream_count ; i++) { 6357 stream = context->streams[i]; 6358 6359 if (!stream) 6360 continue; 6361 6362 set_multisync_trigger_params(stream); 6363 } 6364 } 6365 6366 /** 6367 * DOC: FreeSync Video 6368 * 6369 * When a userspace application wants to play a video, the content follows a 6370 * standard format definition that usually specifies the FPS for that format. 6371 * The below list illustrates some video format and the expected FPS, 6372 * respectively: 6373 * 6374 * - TV/NTSC (23.976 FPS) 6375 * - Cinema (24 FPS) 6376 * - TV/PAL (25 FPS) 6377 * - TV/NTSC (29.97 FPS) 6378 * - TV/NTSC (30 FPS) 6379 * - Cinema HFR (48 FPS) 6380 * - TV/PAL (50 FPS) 6381 * - Commonly used (60 FPS) 6382 * - Multiples of 24 (48,72,96 FPS) 6383 * 6384 * The list of standards video format is not huge and can be added to the 6385 * connector modeset list beforehand. With that, userspace can leverage 6386 * FreeSync to extends the front porch in order to attain the target refresh 6387 * rate. Such a switch will happen seamlessly, without screen blanking or 6388 * reprogramming of the output in any other way. If the userspace requests a 6389 * modesetting change compatible with FreeSync modes that only differ in the 6390 * refresh rate, DC will skip the full update and avoid blink during the 6391 * transition. For example, the video player can change the modesetting from 6392 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6393 * causing any display blink. This same concept can be applied to a mode 6394 * setting change. 6395 */ 6396 static struct drm_display_mode * 6397 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6398 bool use_probed_modes) 6399 { 6400 struct drm_display_mode *m, *m_pref = NULL; 6401 u16 current_refresh, highest_refresh; 6402 struct list_head *list_head = use_probed_modes ? 6403 &aconnector->base.probed_modes : 6404 &aconnector->base.modes; 6405 6406 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6407 return NULL; 6408 6409 if (aconnector->freesync_vid_base.clock != 0) 6410 return &aconnector->freesync_vid_base; 6411 6412 /* Find the preferred mode */ 6413 list_for_each_entry(m, list_head, head) { 6414 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6415 m_pref = m; 6416 break; 6417 } 6418 } 6419 6420 if (!m_pref) { 6421 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6422 m_pref = list_first_entry_or_null( 6423 &aconnector->base.modes, struct drm_display_mode, head); 6424 if (!m_pref) { 6425 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6426 return NULL; 6427 } 6428 } 6429 6430 highest_refresh = drm_mode_vrefresh(m_pref); 6431 6432 /* 6433 * Find the mode with highest refresh rate with same resolution. 6434 * For some monitors, preferred mode is not the mode with highest 6435 * supported refresh rate. 6436 */ 6437 list_for_each_entry(m, list_head, head) { 6438 current_refresh = drm_mode_vrefresh(m); 6439 6440 if (m->hdisplay == m_pref->hdisplay && 6441 m->vdisplay == m_pref->vdisplay && 6442 highest_refresh < current_refresh) { 6443 highest_refresh = current_refresh; 6444 m_pref = m; 6445 } 6446 } 6447 6448 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6449 return m_pref; 6450 } 6451 6452 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6453 struct amdgpu_dm_connector *aconnector) 6454 { 6455 struct drm_display_mode *high_mode; 6456 int timing_diff; 6457 6458 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6459 if (!high_mode || !mode) 6460 return false; 6461 6462 timing_diff = high_mode->vtotal - mode->vtotal; 6463 6464 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6465 high_mode->hdisplay != mode->hdisplay || 6466 high_mode->vdisplay != mode->vdisplay || 6467 high_mode->hsync_start != mode->hsync_start || 6468 high_mode->hsync_end != mode->hsync_end || 6469 high_mode->htotal != mode->htotal || 6470 high_mode->hskew != mode->hskew || 6471 high_mode->vscan != mode->vscan || 6472 high_mode->vsync_start - mode->vsync_start != timing_diff || 6473 high_mode->vsync_end - mode->vsync_end != timing_diff) 6474 return false; 6475 else 6476 return true; 6477 } 6478 6479 #if defined(CONFIG_DRM_AMD_DC_FP) 6480 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6481 struct dc_sink *sink, struct dc_stream_state *stream, 6482 struct dsc_dec_dpcd_caps *dsc_caps) 6483 { 6484 stream->timing.flags.DSC = 0; 6485 dsc_caps->is_dsc_supported = false; 6486 6487 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6488 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6489 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6490 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6491 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6492 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6493 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6494 dsc_caps); 6495 } 6496 } 6497 6498 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6499 struct dc_sink *sink, struct dc_stream_state *stream, 6500 struct dsc_dec_dpcd_caps *dsc_caps, 6501 uint32_t max_dsc_target_bpp_limit_override) 6502 { 6503 const struct dc_link_settings *verified_link_cap = NULL; 6504 u32 link_bw_in_kbps; 6505 u32 edp_min_bpp_x16, edp_max_bpp_x16; 6506 struct dc *dc = sink->ctx->dc; 6507 struct dc_dsc_bw_range bw_range = {0}; 6508 struct dc_dsc_config dsc_cfg = {0}; 6509 struct dc_dsc_config_options dsc_options = {0}; 6510 6511 dc_dsc_get_default_config_option(dc, &dsc_options); 6512 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6513 6514 verified_link_cap = dc_link_get_link_cap(stream->link); 6515 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6516 edp_min_bpp_x16 = 8 * 16; 6517 edp_max_bpp_x16 = 8 * 16; 6518 6519 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6520 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6521 6522 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6523 edp_min_bpp_x16 = edp_max_bpp_x16; 6524 6525 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6526 dc->debug.dsc_min_slice_height_override, 6527 edp_min_bpp_x16, edp_max_bpp_x16, 6528 dsc_caps, 6529 &stream->timing, 6530 dc_link_get_highest_encoding_format(aconnector->dc_link), 6531 &bw_range)) { 6532 6533 if (bw_range.max_kbps < link_bw_in_kbps) { 6534 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6535 dsc_caps, 6536 &dsc_options, 6537 0, 6538 &stream->timing, 6539 dc_link_get_highest_encoding_format(aconnector->dc_link), 6540 &dsc_cfg)) { 6541 stream->timing.dsc_cfg = dsc_cfg; 6542 stream->timing.flags.DSC = 1; 6543 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6544 } 6545 return; 6546 } 6547 } 6548 6549 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6550 dsc_caps, 6551 &dsc_options, 6552 link_bw_in_kbps, 6553 &stream->timing, 6554 dc_link_get_highest_encoding_format(aconnector->dc_link), 6555 &dsc_cfg)) { 6556 stream->timing.dsc_cfg = dsc_cfg; 6557 stream->timing.flags.DSC = 1; 6558 } 6559 } 6560 6561 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6562 struct dc_sink *sink, struct dc_stream_state *stream, 6563 struct dsc_dec_dpcd_caps *dsc_caps) 6564 { 6565 struct drm_connector *drm_connector = &aconnector->base; 6566 u32 link_bandwidth_kbps; 6567 struct dc *dc = sink->ctx->dc; 6568 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 6569 u32 dsc_max_supported_bw_in_kbps; 6570 u32 max_dsc_target_bpp_limit_override = 6571 drm_connector->display_info.max_dsc_bpp; 6572 struct dc_dsc_config_options dsc_options = {0}; 6573 6574 dc_dsc_get_default_config_option(dc, &dsc_options); 6575 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6576 6577 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6578 dc_link_get_link_cap(aconnector->dc_link)); 6579 6580 /* Set DSC policy according to dsc_clock_en */ 6581 dc_dsc_policy_set_enable_dsc_when_not_needed( 6582 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6583 6584 if (sink->sink_signal == SIGNAL_TYPE_EDP && 6585 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 6586 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6587 6588 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6589 6590 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6591 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6592 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6593 dsc_caps, 6594 &dsc_options, 6595 link_bandwidth_kbps, 6596 &stream->timing, 6597 dc_link_get_highest_encoding_format(aconnector->dc_link), 6598 &stream->timing.dsc_cfg)) { 6599 stream->timing.flags.DSC = 1; 6600 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", 6601 __func__, drm_connector->name); 6602 } 6603 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6604 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 6605 dc_link_get_highest_encoding_format(aconnector->dc_link)); 6606 max_supported_bw_in_kbps = link_bandwidth_kbps; 6607 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6608 6609 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6610 max_supported_bw_in_kbps > 0 && 6611 dsc_max_supported_bw_in_kbps > 0) 6612 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6613 dsc_caps, 6614 &dsc_options, 6615 dsc_max_supported_bw_in_kbps, 6616 &stream->timing, 6617 dc_link_get_highest_encoding_format(aconnector->dc_link), 6618 &stream->timing.dsc_cfg)) { 6619 stream->timing.flags.DSC = 1; 6620 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", 6621 __func__, drm_connector->name); 6622 } 6623 } 6624 } 6625 6626 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6627 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6628 stream->timing.flags.DSC = 1; 6629 6630 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6631 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6632 6633 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6634 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6635 6636 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6637 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6638 } 6639 #endif 6640 6641 static struct dc_stream_state * 6642 create_stream_for_sink(struct drm_connector *connector, 6643 const struct drm_display_mode *drm_mode, 6644 const struct dm_connector_state *dm_state, 6645 const struct dc_stream_state *old_stream, 6646 int requested_bpc) 6647 { 6648 struct amdgpu_dm_connector *aconnector = NULL; 6649 struct drm_display_mode *preferred_mode = NULL; 6650 const struct drm_connector_state *con_state = &dm_state->base; 6651 struct dc_stream_state *stream = NULL; 6652 struct drm_display_mode mode; 6653 struct drm_display_mode saved_mode; 6654 struct drm_display_mode *freesync_mode = NULL; 6655 bool native_mode_found = false; 6656 bool recalculate_timing = false; 6657 bool scale = dm_state->scaling != RMX_OFF; 6658 int mode_refresh; 6659 int preferred_refresh = 0; 6660 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6661 #if defined(CONFIG_DRM_AMD_DC_FP) 6662 struct dsc_dec_dpcd_caps dsc_caps; 6663 #endif 6664 struct dc_link *link = NULL; 6665 struct dc_sink *sink = NULL; 6666 6667 drm_mode_init(&mode, drm_mode); 6668 memset(&saved_mode, 0, sizeof(saved_mode)); 6669 6670 if (connector == NULL) { 6671 DRM_ERROR("connector is NULL!\n"); 6672 return stream; 6673 } 6674 6675 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 6676 aconnector = NULL; 6677 aconnector = to_amdgpu_dm_connector(connector); 6678 link = aconnector->dc_link; 6679 } else { 6680 struct drm_writeback_connector *wbcon = NULL; 6681 struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 6682 6683 wbcon = drm_connector_to_writeback(connector); 6684 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 6685 link = dm_wbcon->link; 6686 } 6687 6688 if (!aconnector || !aconnector->dc_sink) { 6689 sink = create_fake_sink(link); 6690 if (!sink) 6691 return stream; 6692 6693 } else { 6694 sink = aconnector->dc_sink; 6695 dc_sink_retain(sink); 6696 } 6697 6698 stream = dc_create_stream_for_sink(sink); 6699 6700 if (stream == NULL) { 6701 DRM_ERROR("Failed to create stream for sink!\n"); 6702 goto finish; 6703 } 6704 6705 /* We leave this NULL for writeback connectors */ 6706 stream->dm_stream_context = aconnector; 6707 6708 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6709 connector->display_info.hdmi.scdc.scrambling.low_rates; 6710 6711 list_for_each_entry(preferred_mode, &connector->modes, head) { 6712 /* Search for preferred mode */ 6713 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6714 native_mode_found = true; 6715 break; 6716 } 6717 } 6718 if (!native_mode_found) 6719 preferred_mode = list_first_entry_or_null( 6720 &connector->modes, 6721 struct drm_display_mode, 6722 head); 6723 6724 mode_refresh = drm_mode_vrefresh(&mode); 6725 6726 if (preferred_mode == NULL) { 6727 /* 6728 * This may not be an error, the use case is when we have no 6729 * usermode calls to reset and set mode upon hotplug. In this 6730 * case, we call set mode ourselves to restore the previous mode 6731 * and the modelist may not be filled in time. 6732 */ 6733 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6734 } else if (aconnector) { 6735 recalculate_timing = amdgpu_freesync_vid_mode && 6736 is_freesync_video_mode(&mode, aconnector); 6737 if (recalculate_timing) { 6738 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6739 drm_mode_copy(&saved_mode, &mode); 6740 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 6741 drm_mode_copy(&mode, freesync_mode); 6742 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 6743 } else { 6744 decide_crtc_timing_for_drm_display_mode( 6745 &mode, preferred_mode, scale); 6746 6747 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6748 } 6749 } 6750 6751 if (recalculate_timing) 6752 drm_mode_set_crtcinfo(&saved_mode, 0); 6753 6754 /* 6755 * If scaling is enabled and refresh rate didn't change 6756 * we copy the vic and polarities of the old timings 6757 */ 6758 if (!scale || mode_refresh != preferred_refresh) 6759 fill_stream_properties_from_drm_display_mode( 6760 stream, &mode, connector, con_state, NULL, 6761 requested_bpc); 6762 else 6763 fill_stream_properties_from_drm_display_mode( 6764 stream, &mode, connector, con_state, old_stream, 6765 requested_bpc); 6766 6767 /* The rest isn't needed for writeback connectors */ 6768 if (!aconnector) 6769 goto finish; 6770 6771 if (aconnector->timing_changed) { 6772 drm_dbg(aconnector->base.dev, 6773 "overriding timing for automated test, bpc %d, changing to %d\n", 6774 stream->timing.display_color_depth, 6775 aconnector->timing_requested->display_color_depth); 6776 stream->timing = *aconnector->timing_requested; 6777 } 6778 6779 #if defined(CONFIG_DRM_AMD_DC_FP) 6780 /* SST DSC determination policy */ 6781 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6782 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6783 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6784 #endif 6785 6786 update_stream_scaling_settings(&mode, dm_state, stream); 6787 6788 fill_audio_info( 6789 &stream->audio_info, 6790 connector, 6791 sink); 6792 6793 update_stream_signal(stream, sink); 6794 6795 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6796 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6797 6798 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6799 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6800 stream->signal == SIGNAL_TYPE_EDP) { 6801 const struct dc_edid_caps *edid_caps; 6802 unsigned int disable_colorimetry = 0; 6803 6804 if (aconnector->dc_sink) { 6805 edid_caps = &aconnector->dc_sink->edid_caps; 6806 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; 6807 } 6808 6809 // 6810 // should decide stream support vsc sdp colorimetry capability 6811 // before building vsc info packet 6812 // 6813 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 6814 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && 6815 !disable_colorimetry; 6816 6817 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 6818 tf = TRANSFER_FUNC_GAMMA_22; 6819 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6820 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6821 6822 } 6823 finish: 6824 dc_sink_release(sink); 6825 6826 return stream; 6827 } 6828 6829 static enum drm_connector_status 6830 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6831 { 6832 bool connected; 6833 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6834 6835 /* 6836 * Notes: 6837 * 1. This interface is NOT called in context of HPD irq. 6838 * 2. This interface *is called* in context of user-mode ioctl. Which 6839 * makes it a bad place for *any* MST-related activity. 6840 */ 6841 6842 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6843 !aconnector->fake_enable) 6844 connected = (aconnector->dc_sink != NULL); 6845 else 6846 connected = (aconnector->base.force == DRM_FORCE_ON || 6847 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6848 6849 update_subconnector_property(aconnector); 6850 6851 return (connected ? connector_status_connected : 6852 connector_status_disconnected); 6853 } 6854 6855 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6856 struct drm_connector_state *connector_state, 6857 struct drm_property *property, 6858 uint64_t val) 6859 { 6860 struct drm_device *dev = connector->dev; 6861 struct amdgpu_device *adev = drm_to_adev(dev); 6862 struct dm_connector_state *dm_old_state = 6863 to_dm_connector_state(connector->state); 6864 struct dm_connector_state *dm_new_state = 6865 to_dm_connector_state(connector_state); 6866 6867 int ret = -EINVAL; 6868 6869 if (property == dev->mode_config.scaling_mode_property) { 6870 enum amdgpu_rmx_type rmx_type; 6871 6872 switch (val) { 6873 case DRM_MODE_SCALE_CENTER: 6874 rmx_type = RMX_CENTER; 6875 break; 6876 case DRM_MODE_SCALE_ASPECT: 6877 rmx_type = RMX_ASPECT; 6878 break; 6879 case DRM_MODE_SCALE_FULLSCREEN: 6880 rmx_type = RMX_FULL; 6881 break; 6882 case DRM_MODE_SCALE_NONE: 6883 default: 6884 rmx_type = RMX_OFF; 6885 break; 6886 } 6887 6888 if (dm_old_state->scaling == rmx_type) 6889 return 0; 6890 6891 dm_new_state->scaling = rmx_type; 6892 ret = 0; 6893 } else if (property == adev->mode_info.underscan_hborder_property) { 6894 dm_new_state->underscan_hborder = val; 6895 ret = 0; 6896 } else if (property == adev->mode_info.underscan_vborder_property) { 6897 dm_new_state->underscan_vborder = val; 6898 ret = 0; 6899 } else if (property == adev->mode_info.underscan_property) { 6900 dm_new_state->underscan_enable = val; 6901 ret = 0; 6902 } 6903 6904 return ret; 6905 } 6906 6907 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6908 const struct drm_connector_state *state, 6909 struct drm_property *property, 6910 uint64_t *val) 6911 { 6912 struct drm_device *dev = connector->dev; 6913 struct amdgpu_device *adev = drm_to_adev(dev); 6914 struct dm_connector_state *dm_state = 6915 to_dm_connector_state(state); 6916 int ret = -EINVAL; 6917 6918 if (property == dev->mode_config.scaling_mode_property) { 6919 switch (dm_state->scaling) { 6920 case RMX_CENTER: 6921 *val = DRM_MODE_SCALE_CENTER; 6922 break; 6923 case RMX_ASPECT: 6924 *val = DRM_MODE_SCALE_ASPECT; 6925 break; 6926 case RMX_FULL: 6927 *val = DRM_MODE_SCALE_FULLSCREEN; 6928 break; 6929 case RMX_OFF: 6930 default: 6931 *val = DRM_MODE_SCALE_NONE; 6932 break; 6933 } 6934 ret = 0; 6935 } else if (property == adev->mode_info.underscan_hborder_property) { 6936 *val = dm_state->underscan_hborder; 6937 ret = 0; 6938 } else if (property == adev->mode_info.underscan_vborder_property) { 6939 *val = dm_state->underscan_vborder; 6940 ret = 0; 6941 } else if (property == adev->mode_info.underscan_property) { 6942 *val = dm_state->underscan_enable; 6943 ret = 0; 6944 } 6945 6946 return ret; 6947 } 6948 6949 /** 6950 * DOC: panel power savings 6951 * 6952 * The display manager allows you to set your desired **panel power savings** 6953 * level (between 0-4, with 0 representing off), e.g. using the following:: 6954 * 6955 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings 6956 * 6957 * Modifying this value can have implications on color accuracy, so tread 6958 * carefully. 6959 */ 6960 6961 static ssize_t panel_power_savings_show(struct device *device, 6962 struct device_attribute *attr, 6963 char *buf) 6964 { 6965 struct drm_connector *connector = dev_get_drvdata(device); 6966 struct drm_device *dev = connector->dev; 6967 u8 val; 6968 6969 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6970 val = to_dm_connector_state(connector->state)->abm_level == 6971 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : 6972 to_dm_connector_state(connector->state)->abm_level; 6973 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6974 6975 return sysfs_emit(buf, "%u\n", val); 6976 } 6977 6978 static ssize_t panel_power_savings_store(struct device *device, 6979 struct device_attribute *attr, 6980 const char *buf, size_t count) 6981 { 6982 struct drm_connector *connector = dev_get_drvdata(device); 6983 struct drm_device *dev = connector->dev; 6984 long val; 6985 int ret; 6986 6987 ret = kstrtol(buf, 0, &val); 6988 6989 if (ret) 6990 return ret; 6991 6992 if (val < 0 || val > 4) 6993 return -EINVAL; 6994 6995 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6996 to_dm_connector_state(connector->state)->abm_level = val ?: 6997 ABM_LEVEL_IMMEDIATE_DISABLE; 6998 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6999 7000 drm_kms_helper_hotplug_event(dev); 7001 7002 return count; 7003 } 7004 7005 static DEVICE_ATTR_RW(panel_power_savings); 7006 7007 static struct attribute *amdgpu_attrs[] = { 7008 &dev_attr_panel_power_savings.attr, 7009 NULL 7010 }; 7011 7012 static const struct attribute_group amdgpu_group = { 7013 .name = "amdgpu", 7014 .attrs = amdgpu_attrs 7015 }; 7016 7017 static bool 7018 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) 7019 { 7020 if (amdgpu_dm_abm_level >= 0) 7021 return false; 7022 7023 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 7024 return false; 7025 7026 /* check for OLED panels */ 7027 if (amdgpu_dm_connector->bl_idx >= 0) { 7028 struct drm_device *drm = amdgpu_dm_connector->base.dev; 7029 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 7030 struct amdgpu_dm_backlight_caps *caps; 7031 7032 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; 7033 if (caps->aux_support) 7034 return false; 7035 } 7036 7037 return true; 7038 } 7039 7040 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7041 { 7042 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7043 7044 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) 7045 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); 7046 7047 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7048 } 7049 7050 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7051 { 7052 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7053 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7054 struct amdgpu_display_manager *dm = &adev->dm; 7055 7056 /* 7057 * Call only if mst_mgr was initialized before since it's not done 7058 * for all connector types. 7059 */ 7060 if (aconnector->mst_mgr.dev) 7061 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7062 7063 if (aconnector->bl_idx != -1) { 7064 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 7065 dm->backlight_dev[aconnector->bl_idx] = NULL; 7066 } 7067 7068 if (aconnector->dc_em_sink) 7069 dc_sink_release(aconnector->dc_em_sink); 7070 aconnector->dc_em_sink = NULL; 7071 if (aconnector->dc_sink) 7072 dc_sink_release(aconnector->dc_sink); 7073 aconnector->dc_sink = NULL; 7074 7075 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7076 drm_connector_unregister(connector); 7077 drm_connector_cleanup(connector); 7078 if (aconnector->i2c) { 7079 i2c_del_adapter(&aconnector->i2c->base); 7080 kfree(aconnector->i2c); 7081 } 7082 kfree(aconnector->dm_dp_aux.aux.name); 7083 7084 kfree(connector); 7085 } 7086 7087 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7088 { 7089 struct dm_connector_state *state = 7090 to_dm_connector_state(connector->state); 7091 7092 if (connector->state) 7093 __drm_atomic_helper_connector_destroy_state(connector->state); 7094 7095 kfree(state); 7096 7097 state = kzalloc(sizeof(*state), GFP_KERNEL); 7098 7099 if (state) { 7100 state->scaling = RMX_OFF; 7101 state->underscan_enable = false; 7102 state->underscan_hborder = 0; 7103 state->underscan_vborder = 0; 7104 state->base.max_requested_bpc = 8; 7105 state->vcpi_slots = 0; 7106 state->pbn = 0; 7107 7108 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 7109 if (amdgpu_dm_abm_level <= 0) 7110 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7111 else 7112 state->abm_level = amdgpu_dm_abm_level; 7113 } 7114 7115 __drm_atomic_helper_connector_reset(connector, &state->base); 7116 } 7117 } 7118 7119 struct drm_connector_state * 7120 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7121 { 7122 struct dm_connector_state *state = 7123 to_dm_connector_state(connector->state); 7124 7125 struct dm_connector_state *new_state = 7126 kmemdup(state, sizeof(*state), GFP_KERNEL); 7127 7128 if (!new_state) 7129 return NULL; 7130 7131 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7132 7133 new_state->freesync_capable = state->freesync_capable; 7134 new_state->abm_level = state->abm_level; 7135 new_state->scaling = state->scaling; 7136 new_state->underscan_enable = state->underscan_enable; 7137 new_state->underscan_hborder = state->underscan_hborder; 7138 new_state->underscan_vborder = state->underscan_vborder; 7139 new_state->vcpi_slots = state->vcpi_slots; 7140 new_state->pbn = state->pbn; 7141 return &new_state->base; 7142 } 7143 7144 static int 7145 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7146 { 7147 struct amdgpu_dm_connector *amdgpu_dm_connector = 7148 to_amdgpu_dm_connector(connector); 7149 int r; 7150 7151 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { 7152 r = sysfs_create_group(&connector->kdev->kobj, 7153 &amdgpu_group); 7154 if (r) 7155 return r; 7156 } 7157 7158 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 7159 7160 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7161 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7162 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7163 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7164 if (r) 7165 return r; 7166 } 7167 7168 #if defined(CONFIG_DEBUG_FS) 7169 connector_debugfs_init(amdgpu_dm_connector); 7170 #endif 7171 7172 return 0; 7173 } 7174 7175 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 7176 { 7177 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7178 struct dc_link *dc_link = aconnector->dc_link; 7179 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 7180 const struct drm_edid *drm_edid; 7181 7182 drm_edid = drm_edid_read(connector); 7183 drm_edid_connector_update(connector, drm_edid); 7184 if (!drm_edid) { 7185 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7186 return; 7187 } 7188 7189 aconnector->drm_edid = drm_edid; 7190 /* Update emulated (virtual) sink's EDID */ 7191 if (dc_em_sink && dc_link) { 7192 // FIXME: Get rid of drm_edid_raw() 7193 const struct edid *edid = drm_edid_raw(drm_edid); 7194 7195 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 7196 memmove(dc_em_sink->dc_edid.raw_edid, edid, 7197 (edid->extensions + 1) * EDID_LENGTH); 7198 dm_helpers_parse_edid_caps( 7199 dc_link, 7200 &dc_em_sink->dc_edid, 7201 &dc_em_sink->edid_caps); 7202 } 7203 } 7204 7205 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7206 .reset = amdgpu_dm_connector_funcs_reset, 7207 .detect = amdgpu_dm_connector_detect, 7208 .fill_modes = drm_helper_probe_single_connector_modes, 7209 .destroy = amdgpu_dm_connector_destroy, 7210 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7211 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7212 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7213 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7214 .late_register = amdgpu_dm_connector_late_register, 7215 .early_unregister = amdgpu_dm_connector_unregister, 7216 .force = amdgpu_dm_connector_funcs_force 7217 }; 7218 7219 static int get_modes(struct drm_connector *connector) 7220 { 7221 return amdgpu_dm_connector_get_modes(connector); 7222 } 7223 7224 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7225 { 7226 struct drm_connector *connector = &aconnector->base; 7227 struct dc_sink_init_data init_params = { 7228 .link = aconnector->dc_link, 7229 .sink_signal = SIGNAL_TYPE_VIRTUAL 7230 }; 7231 const struct drm_edid *drm_edid; 7232 const struct edid *edid; 7233 7234 drm_edid = drm_edid_read(connector); 7235 drm_edid_connector_update(connector, drm_edid); 7236 if (!drm_edid) { 7237 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7238 return; 7239 } 7240 7241 if (connector->display_info.is_hdmi) 7242 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 7243 7244 aconnector->drm_edid = drm_edid; 7245 7246 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 7247 aconnector->dc_em_sink = dc_link_add_remote_sink( 7248 aconnector->dc_link, 7249 (uint8_t *)edid, 7250 (edid->extensions + 1) * EDID_LENGTH, 7251 &init_params); 7252 7253 if (aconnector->base.force == DRM_FORCE_ON) { 7254 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7255 aconnector->dc_link->local_sink : 7256 aconnector->dc_em_sink; 7257 if (aconnector->dc_sink) 7258 dc_sink_retain(aconnector->dc_sink); 7259 } 7260 } 7261 7262 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7263 { 7264 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7265 7266 /* 7267 * In case of headless boot with force on for DP managed connector 7268 * Those settings have to be != 0 to get initial modeset 7269 */ 7270 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7271 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7272 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7273 } 7274 7275 create_eml_sink(aconnector); 7276 } 7277 7278 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 7279 struct dc_stream_state *stream) 7280 { 7281 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 7282 struct dc_plane_state *dc_plane_state = NULL; 7283 struct dc_state *dc_state = NULL; 7284 7285 if (!stream) 7286 goto cleanup; 7287 7288 dc_plane_state = dc_create_plane_state(dc); 7289 if (!dc_plane_state) 7290 goto cleanup; 7291 7292 dc_state = dc_state_create(dc, NULL); 7293 if (!dc_state) 7294 goto cleanup; 7295 7296 /* populate stream to plane */ 7297 dc_plane_state->src_rect.height = stream->src.height; 7298 dc_plane_state->src_rect.width = stream->src.width; 7299 dc_plane_state->dst_rect.height = stream->src.height; 7300 dc_plane_state->dst_rect.width = stream->src.width; 7301 dc_plane_state->clip_rect.height = stream->src.height; 7302 dc_plane_state->clip_rect.width = stream->src.width; 7303 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 7304 dc_plane_state->plane_size.surface_size.height = stream->src.height; 7305 dc_plane_state->plane_size.surface_size.width = stream->src.width; 7306 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 7307 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 7308 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 7309 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 7310 dc_plane_state->rotation = ROTATION_ANGLE_0; 7311 dc_plane_state->is_tiling_rotated = false; 7312 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 7313 7314 dc_result = dc_validate_stream(dc, stream); 7315 if (dc_result == DC_OK) 7316 dc_result = dc_validate_plane(dc, dc_plane_state); 7317 7318 if (dc_result == DC_OK) 7319 dc_result = dc_state_add_stream(dc, dc_state, stream); 7320 7321 if (dc_result == DC_OK && !dc_state_add_plane( 7322 dc, 7323 stream, 7324 dc_plane_state, 7325 dc_state)) 7326 dc_result = DC_FAIL_ATTACH_SURFACES; 7327 7328 if (dc_result == DC_OK) 7329 dc_result = dc_validate_global_state(dc, dc_state, true); 7330 7331 cleanup: 7332 if (dc_state) 7333 dc_state_release(dc_state); 7334 7335 if (dc_plane_state) 7336 dc_plane_state_release(dc_plane_state); 7337 7338 return dc_result; 7339 } 7340 7341 struct dc_stream_state * 7342 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 7343 const struct drm_display_mode *drm_mode, 7344 const struct dm_connector_state *dm_state, 7345 const struct dc_stream_state *old_stream) 7346 { 7347 struct drm_connector *connector = &aconnector->base; 7348 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7349 struct dc_stream_state *stream; 7350 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7351 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7352 enum dc_status dc_result = DC_OK; 7353 uint8_t bpc_limit = 6; 7354 7355 if (!dm_state) 7356 return NULL; 7357 7358 if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || 7359 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 7360 bpc_limit = 8; 7361 7362 do { 7363 stream = create_stream_for_sink(connector, drm_mode, 7364 dm_state, old_stream, 7365 requested_bpc); 7366 if (stream == NULL) { 7367 DRM_ERROR("Failed to create stream for sink!\n"); 7368 break; 7369 } 7370 7371 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7372 return stream; 7373 7374 dc_result = dc_validate_stream(adev->dm.dc, stream); 7375 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 7376 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 7377 7378 if (dc_result == DC_OK) 7379 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 7380 7381 if (dc_result != DC_OK) { 7382 DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", 7383 drm_mode->hdisplay, 7384 drm_mode->vdisplay, 7385 drm_mode->clock, 7386 dc_pixel_encoding_to_str(stream->timing.pixel_encoding), 7387 dc_color_depth_to_str(stream->timing.display_color_depth), 7388 dc_status_to_str(dc_result)); 7389 7390 dc_stream_release(stream); 7391 stream = NULL; 7392 requested_bpc -= 2; /* lower bpc to retry validation */ 7393 } 7394 7395 } while (stream == NULL && requested_bpc >= bpc_limit); 7396 7397 if ((dc_result == DC_FAIL_ENC_VALIDATE || 7398 dc_result == DC_EXCEED_DONGLE_CAP) && 7399 !aconnector->force_yuv420_output) { 7400 DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", 7401 __func__, __LINE__); 7402 7403 aconnector->force_yuv420_output = true; 7404 stream = create_validate_stream_for_sink(aconnector, drm_mode, 7405 dm_state, old_stream); 7406 aconnector->force_yuv420_output = false; 7407 } 7408 7409 return stream; 7410 } 7411 7412 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7413 struct drm_display_mode *mode) 7414 { 7415 int result = MODE_ERROR; 7416 struct dc_sink *dc_sink; 7417 /* TODO: Unhardcode stream count */ 7418 struct dc_stream_state *stream; 7419 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7420 7421 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7422 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7423 return result; 7424 7425 /* 7426 * Only run this the first time mode_valid is called to initilialize 7427 * EDID mgmt 7428 */ 7429 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7430 !aconnector->dc_em_sink) 7431 handle_edid_mgmt(aconnector); 7432 7433 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7434 7435 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7436 aconnector->base.force != DRM_FORCE_ON) { 7437 DRM_ERROR("dc_sink is NULL!\n"); 7438 goto fail; 7439 } 7440 7441 drm_mode_set_crtcinfo(mode, 0); 7442 7443 stream = create_validate_stream_for_sink(aconnector, mode, 7444 to_dm_connector_state(connector->state), 7445 NULL); 7446 if (stream) { 7447 dc_stream_release(stream); 7448 result = MODE_OK; 7449 } 7450 7451 fail: 7452 /* TODO: error handling*/ 7453 return result; 7454 } 7455 7456 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7457 struct dc_info_packet *out) 7458 { 7459 struct hdmi_drm_infoframe frame; 7460 unsigned char buf[30]; /* 26 + 4 */ 7461 ssize_t len; 7462 int ret, i; 7463 7464 memset(out, 0, sizeof(*out)); 7465 7466 if (!state->hdr_output_metadata) 7467 return 0; 7468 7469 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7470 if (ret) 7471 return ret; 7472 7473 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7474 if (len < 0) 7475 return (int)len; 7476 7477 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7478 if (len != 30) 7479 return -EINVAL; 7480 7481 /* Prepare the infopacket for DC. */ 7482 switch (state->connector->connector_type) { 7483 case DRM_MODE_CONNECTOR_HDMIA: 7484 out->hb0 = 0x87; /* type */ 7485 out->hb1 = 0x01; /* version */ 7486 out->hb2 = 0x1A; /* length */ 7487 out->sb[0] = buf[3]; /* checksum */ 7488 i = 1; 7489 break; 7490 7491 case DRM_MODE_CONNECTOR_DisplayPort: 7492 case DRM_MODE_CONNECTOR_eDP: 7493 out->hb0 = 0x00; /* sdp id, zero */ 7494 out->hb1 = 0x87; /* type */ 7495 out->hb2 = 0x1D; /* payload len - 1 */ 7496 out->hb3 = (0x13 << 2); /* sdp version */ 7497 out->sb[0] = 0x01; /* version */ 7498 out->sb[1] = 0x1A; /* length */ 7499 i = 2; 7500 break; 7501 7502 default: 7503 return -EINVAL; 7504 } 7505 7506 memcpy(&out->sb[i], &buf[4], 26); 7507 out->valid = true; 7508 7509 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7510 sizeof(out->sb), false); 7511 7512 return 0; 7513 } 7514 7515 static int 7516 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7517 struct drm_atomic_state *state) 7518 { 7519 struct drm_connector_state *new_con_state = 7520 drm_atomic_get_new_connector_state(state, conn); 7521 struct drm_connector_state *old_con_state = 7522 drm_atomic_get_old_connector_state(state, conn); 7523 struct drm_crtc *crtc = new_con_state->crtc; 7524 struct drm_crtc_state *new_crtc_state; 7525 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 7526 int ret; 7527 7528 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7529 7530 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 7531 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 7532 if (ret < 0) 7533 return ret; 7534 } 7535 7536 if (!crtc) 7537 return 0; 7538 7539 if (new_con_state->colorspace != old_con_state->colorspace) { 7540 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7541 if (IS_ERR(new_crtc_state)) 7542 return PTR_ERR(new_crtc_state); 7543 7544 new_crtc_state->mode_changed = true; 7545 } 7546 7547 if (new_con_state->content_type != old_con_state->content_type) { 7548 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7549 if (IS_ERR(new_crtc_state)) 7550 return PTR_ERR(new_crtc_state); 7551 7552 new_crtc_state->mode_changed = true; 7553 } 7554 7555 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7556 struct dc_info_packet hdr_infopacket; 7557 7558 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7559 if (ret) 7560 return ret; 7561 7562 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7563 if (IS_ERR(new_crtc_state)) 7564 return PTR_ERR(new_crtc_state); 7565 7566 /* 7567 * DC considers the stream backends changed if the 7568 * static metadata changes. Forcing the modeset also 7569 * gives a simple way for userspace to switch from 7570 * 8bpc to 10bpc when setting the metadata to enter 7571 * or exit HDR. 7572 * 7573 * Changing the static metadata after it's been 7574 * set is permissible, however. So only force a 7575 * modeset if we're entering or exiting HDR. 7576 */ 7577 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 7578 !old_con_state->hdr_output_metadata || 7579 !new_con_state->hdr_output_metadata; 7580 } 7581 7582 return 0; 7583 } 7584 7585 static const struct drm_connector_helper_funcs 7586 amdgpu_dm_connector_helper_funcs = { 7587 /* 7588 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7589 * modes will be filtered by drm_mode_validate_size(), and those modes 7590 * are missing after user start lightdm. So we need to renew modes list. 7591 * in get_modes call back, not just return the modes count 7592 */ 7593 .get_modes = get_modes, 7594 .mode_valid = amdgpu_dm_connector_mode_valid, 7595 .atomic_check = amdgpu_dm_connector_atomic_check, 7596 }; 7597 7598 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7599 { 7600 7601 } 7602 7603 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 7604 { 7605 switch (display_color_depth) { 7606 case COLOR_DEPTH_666: 7607 return 6; 7608 case COLOR_DEPTH_888: 7609 return 8; 7610 case COLOR_DEPTH_101010: 7611 return 10; 7612 case COLOR_DEPTH_121212: 7613 return 12; 7614 case COLOR_DEPTH_141414: 7615 return 14; 7616 case COLOR_DEPTH_161616: 7617 return 16; 7618 default: 7619 break; 7620 } 7621 return 0; 7622 } 7623 7624 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7625 struct drm_crtc_state *crtc_state, 7626 struct drm_connector_state *conn_state) 7627 { 7628 struct drm_atomic_state *state = crtc_state->state; 7629 struct drm_connector *connector = conn_state->connector; 7630 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7631 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7632 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7633 struct drm_dp_mst_topology_mgr *mst_mgr; 7634 struct drm_dp_mst_port *mst_port; 7635 struct drm_dp_mst_topology_state *mst_state; 7636 enum dc_color_depth color_depth; 7637 int clock, bpp = 0; 7638 bool is_y420 = false; 7639 7640 if (!aconnector->mst_output_port) 7641 return 0; 7642 7643 mst_port = aconnector->mst_output_port; 7644 mst_mgr = &aconnector->mst_root->mst_mgr; 7645 7646 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7647 return 0; 7648 7649 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 7650 if (IS_ERR(mst_state)) 7651 return PTR_ERR(mst_state); 7652 7653 mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); 7654 7655 if (!state->duplicated) { 7656 int max_bpc = conn_state->max_requested_bpc; 7657 7658 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7659 aconnector->force_yuv420_output; 7660 color_depth = convert_color_depth_from_display_info(connector, 7661 is_y420, 7662 max_bpc); 7663 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7664 clock = adjusted_mode->clock; 7665 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 7666 } 7667 7668 dm_new_connector_state->vcpi_slots = 7669 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 7670 dm_new_connector_state->pbn); 7671 if (dm_new_connector_state->vcpi_slots < 0) { 7672 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7673 return dm_new_connector_state->vcpi_slots; 7674 } 7675 return 0; 7676 } 7677 7678 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7679 .disable = dm_encoder_helper_disable, 7680 .atomic_check = dm_encoder_helper_atomic_check 7681 }; 7682 7683 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7684 struct dc_state *dc_state, 7685 struct dsc_mst_fairness_vars *vars) 7686 { 7687 struct dc_stream_state *stream = NULL; 7688 struct drm_connector *connector; 7689 struct drm_connector_state *new_con_state; 7690 struct amdgpu_dm_connector *aconnector; 7691 struct dm_connector_state *dm_conn_state; 7692 int i, j, ret; 7693 int vcpi, pbn_div, pbn = 0, slot_num = 0; 7694 7695 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7696 7697 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7698 continue; 7699 7700 aconnector = to_amdgpu_dm_connector(connector); 7701 7702 if (!aconnector->mst_output_port) 7703 continue; 7704 7705 if (!new_con_state || !new_con_state->crtc) 7706 continue; 7707 7708 dm_conn_state = to_dm_connector_state(new_con_state); 7709 7710 for (j = 0; j < dc_state->stream_count; j++) { 7711 stream = dc_state->streams[j]; 7712 if (!stream) 7713 continue; 7714 7715 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 7716 break; 7717 7718 stream = NULL; 7719 } 7720 7721 if (!stream) 7722 continue; 7723 7724 pbn_div = dm_mst_get_pbn_divider(stream->link); 7725 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7726 for (j = 0; j < dc_state->stream_count; j++) { 7727 if (vars[j].aconnector == aconnector) { 7728 pbn = vars[j].pbn; 7729 break; 7730 } 7731 } 7732 7733 if (j == dc_state->stream_count || pbn_div == 0) 7734 continue; 7735 7736 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7737 7738 if (stream->timing.flags.DSC != 1) { 7739 dm_conn_state->pbn = pbn; 7740 dm_conn_state->vcpi_slots = slot_num; 7741 7742 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 7743 dm_conn_state->pbn, false); 7744 if (ret < 0) 7745 return ret; 7746 7747 continue; 7748 } 7749 7750 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 7751 if (vcpi < 0) 7752 return vcpi; 7753 7754 dm_conn_state->pbn = pbn; 7755 dm_conn_state->vcpi_slots = vcpi; 7756 } 7757 return 0; 7758 } 7759 7760 static int to_drm_connector_type(enum signal_type st) 7761 { 7762 switch (st) { 7763 case SIGNAL_TYPE_HDMI_TYPE_A: 7764 return DRM_MODE_CONNECTOR_HDMIA; 7765 case SIGNAL_TYPE_EDP: 7766 return DRM_MODE_CONNECTOR_eDP; 7767 case SIGNAL_TYPE_LVDS: 7768 return DRM_MODE_CONNECTOR_LVDS; 7769 case SIGNAL_TYPE_RGB: 7770 return DRM_MODE_CONNECTOR_VGA; 7771 case SIGNAL_TYPE_DISPLAY_PORT: 7772 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7773 return DRM_MODE_CONNECTOR_DisplayPort; 7774 case SIGNAL_TYPE_DVI_DUAL_LINK: 7775 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7776 return DRM_MODE_CONNECTOR_DVID; 7777 case SIGNAL_TYPE_VIRTUAL: 7778 return DRM_MODE_CONNECTOR_VIRTUAL; 7779 7780 default: 7781 return DRM_MODE_CONNECTOR_Unknown; 7782 } 7783 } 7784 7785 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7786 { 7787 struct drm_encoder *encoder; 7788 7789 /* There is only one encoder per connector */ 7790 drm_connector_for_each_possible_encoder(connector, encoder) 7791 return encoder; 7792 7793 return NULL; 7794 } 7795 7796 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7797 { 7798 struct drm_encoder *encoder; 7799 struct amdgpu_encoder *amdgpu_encoder; 7800 7801 encoder = amdgpu_dm_connector_to_encoder(connector); 7802 7803 if (encoder == NULL) 7804 return; 7805 7806 amdgpu_encoder = to_amdgpu_encoder(encoder); 7807 7808 amdgpu_encoder->native_mode.clock = 0; 7809 7810 if (!list_empty(&connector->probed_modes)) { 7811 struct drm_display_mode *preferred_mode = NULL; 7812 7813 list_for_each_entry(preferred_mode, 7814 &connector->probed_modes, 7815 head) { 7816 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7817 amdgpu_encoder->native_mode = *preferred_mode; 7818 7819 break; 7820 } 7821 7822 } 7823 } 7824 7825 static struct drm_display_mode * 7826 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7827 char *name, 7828 int hdisplay, int vdisplay) 7829 { 7830 struct drm_device *dev = encoder->dev; 7831 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7832 struct drm_display_mode *mode = NULL; 7833 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7834 7835 mode = drm_mode_duplicate(dev, native_mode); 7836 7837 if (mode == NULL) 7838 return NULL; 7839 7840 mode->hdisplay = hdisplay; 7841 mode->vdisplay = vdisplay; 7842 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7843 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7844 7845 return mode; 7846 7847 } 7848 7849 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7850 struct drm_connector *connector) 7851 { 7852 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7853 struct drm_display_mode *mode = NULL; 7854 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7855 struct amdgpu_dm_connector *amdgpu_dm_connector = 7856 to_amdgpu_dm_connector(connector); 7857 int i; 7858 int n; 7859 struct mode_size { 7860 char name[DRM_DISPLAY_MODE_LEN]; 7861 int w; 7862 int h; 7863 } common_modes[] = { 7864 { "640x480", 640, 480}, 7865 { "800x600", 800, 600}, 7866 { "1024x768", 1024, 768}, 7867 { "1280x720", 1280, 720}, 7868 { "1280x800", 1280, 800}, 7869 {"1280x1024", 1280, 1024}, 7870 { "1440x900", 1440, 900}, 7871 {"1680x1050", 1680, 1050}, 7872 {"1600x1200", 1600, 1200}, 7873 {"1920x1080", 1920, 1080}, 7874 {"1920x1200", 1920, 1200} 7875 }; 7876 7877 n = ARRAY_SIZE(common_modes); 7878 7879 for (i = 0; i < n; i++) { 7880 struct drm_display_mode *curmode = NULL; 7881 bool mode_existed = false; 7882 7883 if (common_modes[i].w > native_mode->hdisplay || 7884 common_modes[i].h > native_mode->vdisplay || 7885 (common_modes[i].w == native_mode->hdisplay && 7886 common_modes[i].h == native_mode->vdisplay)) 7887 continue; 7888 7889 list_for_each_entry(curmode, &connector->probed_modes, head) { 7890 if (common_modes[i].w == curmode->hdisplay && 7891 common_modes[i].h == curmode->vdisplay) { 7892 mode_existed = true; 7893 break; 7894 } 7895 } 7896 7897 if (mode_existed) 7898 continue; 7899 7900 mode = amdgpu_dm_create_common_mode(encoder, 7901 common_modes[i].name, common_modes[i].w, 7902 common_modes[i].h); 7903 if (!mode) 7904 continue; 7905 7906 drm_mode_probed_add(connector, mode); 7907 amdgpu_dm_connector->num_modes++; 7908 } 7909 } 7910 7911 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7912 { 7913 struct drm_encoder *encoder; 7914 struct amdgpu_encoder *amdgpu_encoder; 7915 const struct drm_display_mode *native_mode; 7916 7917 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7918 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7919 return; 7920 7921 mutex_lock(&connector->dev->mode_config.mutex); 7922 amdgpu_dm_connector_get_modes(connector); 7923 mutex_unlock(&connector->dev->mode_config.mutex); 7924 7925 encoder = amdgpu_dm_connector_to_encoder(connector); 7926 if (!encoder) 7927 return; 7928 7929 amdgpu_encoder = to_amdgpu_encoder(encoder); 7930 7931 native_mode = &amdgpu_encoder->native_mode; 7932 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7933 return; 7934 7935 drm_connector_set_panel_orientation_with_quirk(connector, 7936 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7937 native_mode->hdisplay, 7938 native_mode->vdisplay); 7939 } 7940 7941 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7942 const struct drm_edid *drm_edid) 7943 { 7944 struct amdgpu_dm_connector *amdgpu_dm_connector = 7945 to_amdgpu_dm_connector(connector); 7946 7947 if (drm_edid) { 7948 /* empty probed_modes */ 7949 INIT_LIST_HEAD(&connector->probed_modes); 7950 amdgpu_dm_connector->num_modes = 7951 drm_edid_connector_add_modes(connector); 7952 7953 /* sorting the probed modes before calling function 7954 * amdgpu_dm_get_native_mode() since EDID can have 7955 * more than one preferred mode. The modes that are 7956 * later in the probed mode list could be of higher 7957 * and preferred resolution. For example, 3840x2160 7958 * resolution in base EDID preferred timing and 4096x2160 7959 * preferred resolution in DID extension block later. 7960 */ 7961 drm_mode_sort(&connector->probed_modes); 7962 amdgpu_dm_get_native_mode(connector); 7963 7964 /* Freesync capabilities are reset by calling 7965 * drm_edid_connector_add_modes() and need to be 7966 * restored here. 7967 */ 7968 amdgpu_dm_update_freesync_caps(connector, drm_edid); 7969 } else { 7970 amdgpu_dm_connector->num_modes = 0; 7971 } 7972 } 7973 7974 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7975 struct drm_display_mode *mode) 7976 { 7977 struct drm_display_mode *m; 7978 7979 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7980 if (drm_mode_equal(m, mode)) 7981 return true; 7982 } 7983 7984 return false; 7985 } 7986 7987 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7988 { 7989 const struct drm_display_mode *m; 7990 struct drm_display_mode *new_mode; 7991 uint i; 7992 u32 new_modes_count = 0; 7993 7994 /* Standard FPS values 7995 * 7996 * 23.976 - TV/NTSC 7997 * 24 - Cinema 7998 * 25 - TV/PAL 7999 * 29.97 - TV/NTSC 8000 * 30 - TV/NTSC 8001 * 48 - Cinema HFR 8002 * 50 - TV/PAL 8003 * 60 - Commonly used 8004 * 48,72,96,120 - Multiples of 24 8005 */ 8006 static const u32 common_rates[] = { 8007 23976, 24000, 25000, 29970, 30000, 8008 48000, 50000, 60000, 72000, 96000, 120000 8009 }; 8010 8011 /* 8012 * Find mode with highest refresh rate with the same resolution 8013 * as the preferred mode. Some monitors report a preferred mode 8014 * with lower resolution than the highest refresh rate supported. 8015 */ 8016 8017 m = get_highest_refresh_rate_mode(aconnector, true); 8018 if (!m) 8019 return 0; 8020 8021 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8022 u64 target_vtotal, target_vtotal_diff; 8023 u64 num, den; 8024 8025 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8026 continue; 8027 8028 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8029 common_rates[i] > aconnector->max_vfreq * 1000) 8030 continue; 8031 8032 num = (unsigned long long)m->clock * 1000 * 1000; 8033 den = common_rates[i] * (unsigned long long)m->htotal; 8034 target_vtotal = div_u64(num, den); 8035 target_vtotal_diff = target_vtotal - m->vtotal; 8036 8037 /* Check for illegal modes */ 8038 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8039 m->vsync_end + target_vtotal_diff < m->vsync_start || 8040 m->vtotal + target_vtotal_diff < m->vsync_end) 8041 continue; 8042 8043 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8044 if (!new_mode) 8045 goto out; 8046 8047 new_mode->vtotal += (u16)target_vtotal_diff; 8048 new_mode->vsync_start += (u16)target_vtotal_diff; 8049 new_mode->vsync_end += (u16)target_vtotal_diff; 8050 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8051 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8052 8053 if (!is_duplicate_mode(aconnector, new_mode)) { 8054 drm_mode_probed_add(&aconnector->base, new_mode); 8055 new_modes_count += 1; 8056 } else 8057 drm_mode_destroy(aconnector->base.dev, new_mode); 8058 } 8059 out: 8060 return new_modes_count; 8061 } 8062 8063 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8064 const struct drm_edid *drm_edid) 8065 { 8066 struct amdgpu_dm_connector *amdgpu_dm_connector = 8067 to_amdgpu_dm_connector(connector); 8068 8069 if (!(amdgpu_freesync_vid_mode && drm_edid)) 8070 return; 8071 8072 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8073 amdgpu_dm_connector->num_modes += 8074 add_fs_modes(amdgpu_dm_connector); 8075 } 8076 8077 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8078 { 8079 struct amdgpu_dm_connector *amdgpu_dm_connector = 8080 to_amdgpu_dm_connector(connector); 8081 struct drm_encoder *encoder; 8082 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; 8083 struct dc_link_settings *verified_link_cap = 8084 &amdgpu_dm_connector->dc_link->verified_link_cap; 8085 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 8086 8087 encoder = amdgpu_dm_connector_to_encoder(connector); 8088 8089 if (!drm_edid) { 8090 amdgpu_dm_connector->num_modes = 8091 drm_add_modes_noedid(connector, 640, 480); 8092 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 8093 amdgpu_dm_connector->num_modes += 8094 drm_add_modes_noedid(connector, 1920, 1080); 8095 } else { 8096 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); 8097 if (encoder) 8098 amdgpu_dm_connector_add_common_modes(encoder, connector); 8099 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); 8100 } 8101 amdgpu_dm_fbc_init(connector); 8102 8103 return amdgpu_dm_connector->num_modes; 8104 } 8105 8106 static const u32 supported_colorspaces = 8107 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 8108 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 8109 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 8110 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 8111 8112 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8113 struct amdgpu_dm_connector *aconnector, 8114 int connector_type, 8115 struct dc_link *link, 8116 int link_index) 8117 { 8118 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8119 8120 /* 8121 * Some of the properties below require access to state, like bpc. 8122 * Allocate some default initial connector state with our reset helper. 8123 */ 8124 if (aconnector->base.funcs->reset) 8125 aconnector->base.funcs->reset(&aconnector->base); 8126 8127 aconnector->connector_id = link_index; 8128 aconnector->bl_idx = -1; 8129 aconnector->dc_link = link; 8130 aconnector->base.interlace_allowed = false; 8131 aconnector->base.doublescan_allowed = false; 8132 aconnector->base.stereo_allowed = false; 8133 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8134 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8135 aconnector->audio_inst = -1; 8136 aconnector->pack_sdp_v1_3 = false; 8137 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 8138 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 8139 mutex_init(&aconnector->hpd_lock); 8140 mutex_init(&aconnector->handle_mst_msg_ready); 8141 8142 /* 8143 * configure support HPD hot plug connector_>polled default value is 0 8144 * which means HPD hot plug not supported 8145 */ 8146 switch (connector_type) { 8147 case DRM_MODE_CONNECTOR_HDMIA: 8148 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8149 aconnector->base.ycbcr_420_allowed = 8150 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8151 break; 8152 case DRM_MODE_CONNECTOR_DisplayPort: 8153 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8154 link->link_enc = link_enc_cfg_get_link_enc(link); 8155 ASSERT(link->link_enc); 8156 if (link->link_enc) 8157 aconnector->base.ycbcr_420_allowed = 8158 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8159 break; 8160 case DRM_MODE_CONNECTOR_DVID: 8161 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8162 break; 8163 default: 8164 break; 8165 } 8166 8167 drm_object_attach_property(&aconnector->base.base, 8168 dm->ddev->mode_config.scaling_mode_property, 8169 DRM_MODE_SCALE_NONE); 8170 8171 drm_object_attach_property(&aconnector->base.base, 8172 adev->mode_info.underscan_property, 8173 UNDERSCAN_OFF); 8174 drm_object_attach_property(&aconnector->base.base, 8175 adev->mode_info.underscan_hborder_property, 8176 0); 8177 drm_object_attach_property(&aconnector->base.base, 8178 adev->mode_info.underscan_vborder_property, 8179 0); 8180 8181 if (!aconnector->mst_root) 8182 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8183 8184 aconnector->base.state->max_bpc = 16; 8185 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8186 8187 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8188 /* Content Type is currently only implemented for HDMI. */ 8189 drm_connector_attach_content_type_property(&aconnector->base); 8190 } 8191 8192 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8193 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 8194 drm_connector_attach_colorspace_property(&aconnector->base); 8195 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 8196 connector_type == DRM_MODE_CONNECTOR_eDP) { 8197 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 8198 drm_connector_attach_colorspace_property(&aconnector->base); 8199 } 8200 8201 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8202 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8203 connector_type == DRM_MODE_CONNECTOR_eDP) { 8204 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8205 8206 if (!aconnector->mst_root) 8207 drm_connector_attach_vrr_capable_property(&aconnector->base); 8208 8209 if (adev->dm.hdcp_workqueue) 8210 drm_connector_attach_content_protection_property(&aconnector->base, true); 8211 } 8212 } 8213 8214 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8215 struct i2c_msg *msgs, int num) 8216 { 8217 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8218 struct ddc_service *ddc_service = i2c->ddc_service; 8219 struct i2c_command cmd; 8220 int i; 8221 int result = -EIO; 8222 8223 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 8224 return result; 8225 8226 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8227 8228 if (!cmd.payloads) 8229 return result; 8230 8231 cmd.number_of_payloads = num; 8232 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8233 cmd.speed = 100; 8234 8235 for (i = 0; i < num; i++) { 8236 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8237 cmd.payloads[i].address = msgs[i].addr; 8238 cmd.payloads[i].length = msgs[i].len; 8239 cmd.payloads[i].data = msgs[i].buf; 8240 } 8241 8242 if (dc_submit_i2c( 8243 ddc_service->ctx->dc, 8244 ddc_service->link->link_index, 8245 &cmd)) 8246 result = num; 8247 8248 kfree(cmd.payloads); 8249 return result; 8250 } 8251 8252 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8253 { 8254 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8255 } 8256 8257 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8258 .master_xfer = amdgpu_dm_i2c_xfer, 8259 .functionality = amdgpu_dm_i2c_func, 8260 }; 8261 8262 static struct amdgpu_i2c_adapter * 8263 create_i2c(struct ddc_service *ddc_service, 8264 int link_index, 8265 int *res) 8266 { 8267 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8268 struct amdgpu_i2c_adapter *i2c; 8269 8270 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8271 if (!i2c) 8272 return NULL; 8273 i2c->base.owner = THIS_MODULE; 8274 i2c->base.dev.parent = &adev->pdev->dev; 8275 i2c->base.algo = &amdgpu_dm_i2c_algo; 8276 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8277 i2c_set_adapdata(&i2c->base, i2c); 8278 i2c->ddc_service = ddc_service; 8279 8280 return i2c; 8281 } 8282 8283 8284 /* 8285 * Note: this function assumes that dc_link_detect() was called for the 8286 * dc_link which will be represented by this aconnector. 8287 */ 8288 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8289 struct amdgpu_dm_connector *aconnector, 8290 u32 link_index, 8291 struct amdgpu_encoder *aencoder) 8292 { 8293 int res = 0; 8294 int connector_type; 8295 struct dc *dc = dm->dc; 8296 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8297 struct amdgpu_i2c_adapter *i2c; 8298 8299 /* Not needed for writeback connector */ 8300 link->priv = aconnector; 8301 8302 8303 i2c = create_i2c(link->ddc, link->link_index, &res); 8304 if (!i2c) { 8305 DRM_ERROR("Failed to create i2c adapter data\n"); 8306 return -ENOMEM; 8307 } 8308 8309 aconnector->i2c = i2c; 8310 res = i2c_add_adapter(&i2c->base); 8311 8312 if (res) { 8313 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8314 goto out_free; 8315 } 8316 8317 connector_type = to_drm_connector_type(link->connector_signal); 8318 8319 res = drm_connector_init_with_ddc( 8320 dm->ddev, 8321 &aconnector->base, 8322 &amdgpu_dm_connector_funcs, 8323 connector_type, 8324 &i2c->base); 8325 8326 if (res) { 8327 DRM_ERROR("connector_init failed\n"); 8328 aconnector->connector_id = -1; 8329 goto out_free; 8330 } 8331 8332 drm_connector_helper_add( 8333 &aconnector->base, 8334 &amdgpu_dm_connector_helper_funcs); 8335 8336 amdgpu_dm_connector_init_helper( 8337 dm, 8338 aconnector, 8339 connector_type, 8340 link, 8341 link_index); 8342 8343 drm_connector_attach_encoder( 8344 &aconnector->base, &aencoder->base); 8345 8346 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8347 || connector_type == DRM_MODE_CONNECTOR_eDP) 8348 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8349 8350 out_free: 8351 if (res) { 8352 kfree(i2c); 8353 aconnector->i2c = NULL; 8354 } 8355 return res; 8356 } 8357 8358 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8359 { 8360 switch (adev->mode_info.num_crtc) { 8361 case 1: 8362 return 0x1; 8363 case 2: 8364 return 0x3; 8365 case 3: 8366 return 0x7; 8367 case 4: 8368 return 0xf; 8369 case 5: 8370 return 0x1f; 8371 case 6: 8372 default: 8373 return 0x3f; 8374 } 8375 } 8376 8377 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8378 struct amdgpu_encoder *aencoder, 8379 uint32_t link_index) 8380 { 8381 struct amdgpu_device *adev = drm_to_adev(dev); 8382 8383 int res = drm_encoder_init(dev, 8384 &aencoder->base, 8385 &amdgpu_dm_encoder_funcs, 8386 DRM_MODE_ENCODER_TMDS, 8387 NULL); 8388 8389 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8390 8391 if (!res) 8392 aencoder->encoder_id = link_index; 8393 else 8394 aencoder->encoder_id = -1; 8395 8396 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8397 8398 return res; 8399 } 8400 8401 static void manage_dm_interrupts(struct amdgpu_device *adev, 8402 struct amdgpu_crtc *acrtc, 8403 struct dm_crtc_state *acrtc_state) 8404 { 8405 struct drm_vblank_crtc_config config = {0}; 8406 struct dc_crtc_timing *timing; 8407 int offdelay; 8408 8409 if (acrtc_state) { 8410 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8411 IP_VERSION(3, 5, 0) || 8412 acrtc_state->stream->link->psr_settings.psr_version < 8413 DC_PSR_VERSION_UNSUPPORTED || 8414 !(adev->flags & AMD_IS_APU)) { 8415 timing = &acrtc_state->stream->timing; 8416 8417 /* at least 2 frames */ 8418 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8419 timing->v_total * 8420 timing->h_total, 8421 timing->pix_clk_100hz); 8422 8423 config.offdelay_ms = offdelay ?: 30; 8424 } else { 8425 config.disable_immediate = true; 8426 } 8427 8428 drm_crtc_vblank_on_config(&acrtc->base, 8429 &config); 8430 } else { 8431 drm_crtc_vblank_off(&acrtc->base); 8432 } 8433 } 8434 8435 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8436 struct amdgpu_crtc *acrtc) 8437 { 8438 int irq_type = 8439 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8440 8441 /** 8442 * This reads the current state for the IRQ and force reapplies 8443 * the setting to hardware. 8444 */ 8445 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8446 } 8447 8448 static bool 8449 is_scaling_state_different(const struct dm_connector_state *dm_state, 8450 const struct dm_connector_state *old_dm_state) 8451 { 8452 if (dm_state->scaling != old_dm_state->scaling) 8453 return true; 8454 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8455 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8456 return true; 8457 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8458 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8459 return true; 8460 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8461 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8462 return true; 8463 return false; 8464 } 8465 8466 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 8467 struct drm_crtc_state *old_crtc_state, 8468 struct drm_connector_state *new_conn_state, 8469 struct drm_connector_state *old_conn_state, 8470 const struct drm_connector *connector, 8471 struct hdcp_workqueue *hdcp_w) 8472 { 8473 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8474 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8475 8476 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8477 connector->index, connector->status, connector->dpms); 8478 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8479 old_conn_state->content_protection, new_conn_state->content_protection); 8480 8481 if (old_crtc_state) 8482 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8483 old_crtc_state->enable, 8484 old_crtc_state->active, 8485 old_crtc_state->mode_changed, 8486 old_crtc_state->active_changed, 8487 old_crtc_state->connectors_changed); 8488 8489 if (new_crtc_state) 8490 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8491 new_crtc_state->enable, 8492 new_crtc_state->active, 8493 new_crtc_state->mode_changed, 8494 new_crtc_state->active_changed, 8495 new_crtc_state->connectors_changed); 8496 8497 /* hdcp content type change */ 8498 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 8499 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8500 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8501 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 8502 return true; 8503 } 8504 8505 /* CP is being re enabled, ignore this */ 8506 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8507 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8508 if (new_crtc_state && new_crtc_state->mode_changed) { 8509 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8510 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 8511 return true; 8512 } 8513 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8514 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 8515 return false; 8516 } 8517 8518 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8519 * 8520 * Handles: UNDESIRED -> ENABLED 8521 */ 8522 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8523 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8524 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8525 8526 /* Stream removed and re-enabled 8527 * 8528 * Can sometimes overlap with the HPD case, 8529 * thus set update_hdcp to false to avoid 8530 * setting HDCP multiple times. 8531 * 8532 * Handles: DESIRED -> DESIRED (Special case) 8533 */ 8534 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 8535 new_conn_state->crtc && new_conn_state->crtc->enabled && 8536 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8537 dm_con_state->update_hdcp = false; 8538 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 8539 __func__); 8540 return true; 8541 } 8542 8543 /* Hot-plug, headless s3, dpms 8544 * 8545 * Only start HDCP if the display is connected/enabled. 8546 * update_hdcp flag will be set to false until the next 8547 * HPD comes in. 8548 * 8549 * Handles: DESIRED -> DESIRED (Special case) 8550 */ 8551 if (dm_con_state->update_hdcp && 8552 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8553 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8554 dm_con_state->update_hdcp = false; 8555 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 8556 __func__); 8557 return true; 8558 } 8559 8560 if (old_conn_state->content_protection == new_conn_state->content_protection) { 8561 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8562 if (new_crtc_state && new_crtc_state->mode_changed) { 8563 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 8564 __func__); 8565 return true; 8566 } 8567 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 8568 __func__); 8569 return false; 8570 } 8571 8572 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 8573 return false; 8574 } 8575 8576 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8577 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 8578 __func__); 8579 return true; 8580 } 8581 8582 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 8583 return false; 8584 } 8585 8586 static void remove_stream(struct amdgpu_device *adev, 8587 struct amdgpu_crtc *acrtc, 8588 struct dc_stream_state *stream) 8589 { 8590 /* this is the update mode case */ 8591 8592 acrtc->otg_inst = -1; 8593 acrtc->enabled = false; 8594 } 8595 8596 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8597 { 8598 8599 assert_spin_locked(&acrtc->base.dev->event_lock); 8600 WARN_ON(acrtc->event); 8601 8602 acrtc->event = acrtc->base.state->event; 8603 8604 /* Set the flip status */ 8605 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8606 8607 /* Mark this event as consumed */ 8608 acrtc->base.state->event = NULL; 8609 8610 drm_dbg_state(acrtc->base.dev, 8611 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8612 acrtc->crtc_id); 8613 } 8614 8615 static void update_freesync_state_on_stream( 8616 struct amdgpu_display_manager *dm, 8617 struct dm_crtc_state *new_crtc_state, 8618 struct dc_stream_state *new_stream, 8619 struct dc_plane_state *surface, 8620 u32 flip_timestamp_in_us) 8621 { 8622 struct mod_vrr_params vrr_params; 8623 struct dc_info_packet vrr_infopacket = {0}; 8624 struct amdgpu_device *adev = dm->adev; 8625 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8626 unsigned long flags; 8627 bool pack_sdp_v1_3 = false; 8628 struct amdgpu_dm_connector *aconn; 8629 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 8630 8631 if (!new_stream) 8632 return; 8633 8634 /* 8635 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8636 * For now it's sufficient to just guard against these conditions. 8637 */ 8638 8639 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8640 return; 8641 8642 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8643 vrr_params = acrtc->dm_irq_params.vrr_params; 8644 8645 if (surface) { 8646 mod_freesync_handle_preflip( 8647 dm->freesync_module, 8648 surface, 8649 new_stream, 8650 flip_timestamp_in_us, 8651 &vrr_params); 8652 8653 if (adev->family < AMDGPU_FAMILY_AI && 8654 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 8655 mod_freesync_handle_v_update(dm->freesync_module, 8656 new_stream, &vrr_params); 8657 8658 /* Need to call this before the frame ends. */ 8659 dc_stream_adjust_vmin_vmax(dm->dc, 8660 new_crtc_state->stream, 8661 &vrr_params.adjust); 8662 } 8663 } 8664 8665 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 8666 8667 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 8668 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 8669 8670 if (aconn->vsdb_info.amd_vsdb_version == 1) 8671 packet_type = PACKET_TYPE_FS_V1; 8672 else if (aconn->vsdb_info.amd_vsdb_version == 2) 8673 packet_type = PACKET_TYPE_FS_V2; 8674 else if (aconn->vsdb_info.amd_vsdb_version == 3) 8675 packet_type = PACKET_TYPE_FS_V3; 8676 8677 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 8678 &new_stream->adaptive_sync_infopacket); 8679 } 8680 8681 mod_freesync_build_vrr_infopacket( 8682 dm->freesync_module, 8683 new_stream, 8684 &vrr_params, 8685 packet_type, 8686 TRANSFER_FUNC_UNKNOWN, 8687 &vrr_infopacket, 8688 pack_sdp_v1_3); 8689 8690 new_crtc_state->freesync_vrr_info_changed |= 8691 (memcmp(&new_crtc_state->vrr_infopacket, 8692 &vrr_infopacket, 8693 sizeof(vrr_infopacket)) != 0); 8694 8695 acrtc->dm_irq_params.vrr_params = vrr_params; 8696 new_crtc_state->vrr_infopacket = vrr_infopacket; 8697 8698 new_stream->vrr_infopacket = vrr_infopacket; 8699 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 8700 8701 if (new_crtc_state->freesync_vrr_info_changed) 8702 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8703 new_crtc_state->base.crtc->base.id, 8704 (int)new_crtc_state->base.vrr_enabled, 8705 (int)vrr_params.state); 8706 8707 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8708 } 8709 8710 static void update_stream_irq_parameters( 8711 struct amdgpu_display_manager *dm, 8712 struct dm_crtc_state *new_crtc_state) 8713 { 8714 struct dc_stream_state *new_stream = new_crtc_state->stream; 8715 struct mod_vrr_params vrr_params; 8716 struct mod_freesync_config config = new_crtc_state->freesync_config; 8717 struct amdgpu_device *adev = dm->adev; 8718 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8719 unsigned long flags; 8720 8721 if (!new_stream) 8722 return; 8723 8724 /* 8725 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8726 * For now it's sufficient to just guard against these conditions. 8727 */ 8728 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8729 return; 8730 8731 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8732 vrr_params = acrtc->dm_irq_params.vrr_params; 8733 8734 if (new_crtc_state->vrr_supported && 8735 config.min_refresh_in_uhz && 8736 config.max_refresh_in_uhz) { 8737 /* 8738 * if freesync compatible mode was set, config.state will be set 8739 * in atomic check 8740 */ 8741 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8742 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8743 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8744 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8745 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8746 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8747 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8748 } else { 8749 config.state = new_crtc_state->base.vrr_enabled ? 8750 VRR_STATE_ACTIVE_VARIABLE : 8751 VRR_STATE_INACTIVE; 8752 } 8753 } else { 8754 config.state = VRR_STATE_UNSUPPORTED; 8755 } 8756 8757 mod_freesync_build_vrr_params(dm->freesync_module, 8758 new_stream, 8759 &config, &vrr_params); 8760 8761 new_crtc_state->freesync_config = config; 8762 /* Copy state for access from DM IRQ handler */ 8763 acrtc->dm_irq_params.freesync_config = config; 8764 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8765 acrtc->dm_irq_params.vrr_params = vrr_params; 8766 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8767 } 8768 8769 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8770 struct dm_crtc_state *new_state) 8771 { 8772 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 8773 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 8774 8775 if (!old_vrr_active && new_vrr_active) { 8776 /* Transition VRR inactive -> active: 8777 * While VRR is active, we must not disable vblank irq, as a 8778 * reenable after disable would compute bogus vblank/pflip 8779 * timestamps if it likely happened inside display front-porch. 8780 * 8781 * We also need vupdate irq for the actual core vblank handling 8782 * at end of vblank. 8783 */ 8784 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 8785 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 8786 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8787 __func__, new_state->base.crtc->base.id); 8788 } else if (old_vrr_active && !new_vrr_active) { 8789 /* Transition VRR active -> inactive: 8790 * Allow vblank irq disable again for fixed refresh rate. 8791 */ 8792 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 8793 drm_crtc_vblank_put(new_state->base.crtc); 8794 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8795 __func__, new_state->base.crtc->base.id); 8796 } 8797 } 8798 8799 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8800 { 8801 struct drm_plane *plane; 8802 struct drm_plane_state *old_plane_state; 8803 int i; 8804 8805 /* 8806 * TODO: Make this per-stream so we don't issue redundant updates for 8807 * commits with multiple streams. 8808 */ 8809 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8810 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8811 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8812 } 8813 8814 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8815 { 8816 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8817 8818 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8819 } 8820 8821 static void amdgpu_dm_update_cursor(struct drm_plane *plane, 8822 struct drm_plane_state *old_plane_state, 8823 struct dc_stream_update *update) 8824 { 8825 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8826 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8827 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8828 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8829 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8830 uint64_t address = afb ? afb->address : 0; 8831 struct dc_cursor_position position = {0}; 8832 struct dc_cursor_attributes attributes; 8833 int ret; 8834 8835 if (!plane->state->fb && !old_plane_state->fb) 8836 return; 8837 8838 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 8839 amdgpu_crtc->crtc_id, plane->state->crtc_w, 8840 plane->state->crtc_h); 8841 8842 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 8843 if (ret) 8844 return; 8845 8846 if (!position.enable) { 8847 /* turn off cursor */ 8848 if (crtc_state && crtc_state->stream) { 8849 dc_stream_set_cursor_position(crtc_state->stream, 8850 &position); 8851 update->cursor_position = &crtc_state->stream->cursor_position; 8852 } 8853 return; 8854 } 8855 8856 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8857 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8858 8859 memset(&attributes, 0, sizeof(attributes)); 8860 attributes.address.high_part = upper_32_bits(address); 8861 attributes.address.low_part = lower_32_bits(address); 8862 attributes.width = plane->state->crtc_w; 8863 attributes.height = plane->state->crtc_h; 8864 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8865 attributes.rotation_angle = 0; 8866 attributes.attribute_flags.value = 0; 8867 8868 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 8869 * legacy gamma setup. 8870 */ 8871 if (crtc_state->cm_is_degamma_srgb && 8872 adev->dm.dc->caps.color.dpp.gamma_corr) 8873 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 8874 8875 if (afb) 8876 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8877 8878 if (crtc_state->stream) { 8879 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8880 &attributes)) 8881 DRM_ERROR("DC failed to set cursor attributes\n"); 8882 8883 update->cursor_attributes = &crtc_state->stream->cursor_attributes; 8884 8885 if (!dc_stream_set_cursor_position(crtc_state->stream, 8886 &position)) 8887 DRM_ERROR("DC failed to set cursor position\n"); 8888 8889 update->cursor_position = &crtc_state->stream->cursor_position; 8890 } 8891 } 8892 8893 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 8894 const struct dm_crtc_state *acrtc_state, 8895 const u64 current_ts) 8896 { 8897 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 8898 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 8899 struct amdgpu_dm_connector *aconn = 8900 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8901 8902 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 8903 if (pr->config.replay_supported && !pr->replay_feature_enabled) 8904 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 8905 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 8906 !psr->psr_feature_enabled) 8907 if (!aconn->disallow_edp_enter_psr) 8908 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8909 } 8910 8911 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 8912 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8913 (psr->psr_feature_enabled || pr->config.replay_supported)) { 8914 if (aconn->sr_skip_count > 0) 8915 aconn->sr_skip_count--; 8916 8917 /* Allow SR when skip count is 0. */ 8918 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 8919 8920 /* 8921 * If sink supports PSR SU/Panel Replay, there is no need to rely on 8922 * a vblank event disable request to enable PSR/RP. PSR SU/RP 8923 * can be enabled immediately once OS demonstrates an 8924 * adequate number of fast atomic commits to notify KMD 8925 * of update events. See `vblank_control_worker()`. 8926 */ 8927 if (acrtc_attach->dm_irq_params.allow_sr_entry && 8928 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8929 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8930 #endif 8931 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 8932 if (pr->replay_feature_enabled && !pr->replay_allow_active) 8933 amdgpu_dm_replay_enable(acrtc_state->stream, true); 8934 if (psr->psr_version >= DC_PSR_VERSION_SU_1 && 8935 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 8936 amdgpu_dm_psr_enable(acrtc_state->stream); 8937 } 8938 } else { 8939 acrtc_attach->dm_irq_params.allow_sr_entry = false; 8940 } 8941 } 8942 8943 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8944 struct drm_device *dev, 8945 struct amdgpu_display_manager *dm, 8946 struct drm_crtc *pcrtc, 8947 bool wait_for_vblank) 8948 { 8949 u32 i; 8950 u64 timestamp_ns = ktime_get_ns(); 8951 struct drm_plane *plane; 8952 struct drm_plane_state *old_plane_state, *new_plane_state; 8953 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8954 struct drm_crtc_state *new_pcrtc_state = 8955 drm_atomic_get_new_crtc_state(state, pcrtc); 8956 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8957 struct dm_crtc_state *dm_old_crtc_state = 8958 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8959 int planes_count = 0, vpos, hpos; 8960 unsigned long flags; 8961 u32 target_vblank, last_flip_vblank; 8962 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8963 bool cursor_update = false; 8964 bool pflip_present = false; 8965 bool dirty_rects_changed = false; 8966 bool updated_planes_and_streams = false; 8967 struct { 8968 struct dc_surface_update surface_updates[MAX_SURFACES]; 8969 struct dc_plane_info plane_infos[MAX_SURFACES]; 8970 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8971 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8972 struct dc_stream_update stream_update; 8973 } *bundle; 8974 8975 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8976 8977 if (!bundle) { 8978 drm_err(dev, "Failed to allocate update bundle\n"); 8979 goto cleanup; 8980 } 8981 8982 /* 8983 * Disable the cursor first if we're disabling all the planes. 8984 * It'll remain on the screen after the planes are re-enabled 8985 * if we don't. 8986 * 8987 * If the cursor is transitioning from native to overlay mode, the 8988 * native cursor needs to be disabled first. 8989 */ 8990 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && 8991 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 8992 struct dc_cursor_position cursor_position = {0}; 8993 8994 if (!dc_stream_set_cursor_position(acrtc_state->stream, 8995 &cursor_position)) 8996 drm_err(dev, "DC failed to disable native cursor\n"); 8997 8998 bundle->stream_update.cursor_position = 8999 &acrtc_state->stream->cursor_position; 9000 } 9001 9002 if (acrtc_state->active_planes == 0 && 9003 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9004 amdgpu_dm_commit_cursors(state); 9005 9006 /* update planes when needed */ 9007 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9008 struct drm_crtc *crtc = new_plane_state->crtc; 9009 struct drm_crtc_state *new_crtc_state; 9010 struct drm_framebuffer *fb = new_plane_state->fb; 9011 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9012 bool plane_needs_flip; 9013 struct dc_plane_state *dc_plane; 9014 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9015 9016 /* Cursor plane is handled after stream updates */ 9017 if (plane->type == DRM_PLANE_TYPE_CURSOR && 9018 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9019 if ((fb && crtc == pcrtc) || 9020 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { 9021 cursor_update = true; 9022 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) 9023 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); 9024 } 9025 9026 continue; 9027 } 9028 9029 if (!fb || !crtc || pcrtc != crtc) 9030 continue; 9031 9032 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9033 if (!new_crtc_state->active) 9034 continue; 9035 9036 dc_plane = dm_new_plane_state->dc_state; 9037 if (!dc_plane) 9038 continue; 9039 9040 bundle->surface_updates[planes_count].surface = dc_plane; 9041 if (new_pcrtc_state->color_mgmt_changed) { 9042 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; 9043 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; 9044 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9045 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; 9046 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; 9047 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; 9048 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; 9049 } 9050 9051 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 9052 &bundle->scaling_infos[planes_count]); 9053 9054 bundle->surface_updates[planes_count].scaling_info = 9055 &bundle->scaling_infos[planes_count]; 9056 9057 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9058 9059 pflip_present = pflip_present || plane_needs_flip; 9060 9061 if (!plane_needs_flip) { 9062 planes_count += 1; 9063 continue; 9064 } 9065 9066 fill_dc_plane_info_and_addr( 9067 dm->adev, new_plane_state, 9068 afb->tiling_flags, 9069 &bundle->plane_infos[planes_count], 9070 &bundle->flip_addrs[planes_count].address, 9071 afb->tmz_surface); 9072 9073 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9074 new_plane_state->plane->index, 9075 bundle->plane_infos[planes_count].dcc.enable); 9076 9077 bundle->surface_updates[planes_count].plane_info = 9078 &bundle->plane_infos[planes_count]; 9079 9080 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 9081 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9082 fill_dc_dirty_rects(plane, old_plane_state, 9083 new_plane_state, new_crtc_state, 9084 &bundle->flip_addrs[planes_count], 9085 acrtc_state->stream->link->psr_settings.psr_version == 9086 DC_PSR_VERSION_SU_1, 9087 &dirty_rects_changed); 9088 9089 /* 9090 * If the dirty regions changed, PSR-SU need to be disabled temporarily 9091 * and enabled it again after dirty regions are stable to avoid video glitch. 9092 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 9093 * during the PSR-SU was disabled. 9094 */ 9095 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9096 acrtc_attach->dm_irq_params.allow_sr_entry && 9097 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9098 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9099 #endif 9100 dirty_rects_changed) { 9101 mutex_lock(&dm->dc_lock); 9102 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 9103 timestamp_ns; 9104 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9105 amdgpu_dm_psr_disable(acrtc_state->stream); 9106 mutex_unlock(&dm->dc_lock); 9107 } 9108 } 9109 9110 /* 9111 * Only allow immediate flips for fast updates that don't 9112 * change memory domain, FB pitch, DCC state, rotation or 9113 * mirroring. 9114 * 9115 * dm_crtc_helper_atomic_check() only accepts async flips with 9116 * fast updates. 9117 */ 9118 if (crtc->state->async_flip && 9119 (acrtc_state->update_type != UPDATE_TYPE_FAST || 9120 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 9121 drm_warn_once(state->dev, 9122 "[PLANE:%d:%s] async flip with non-fast update\n", 9123 plane->base.id, plane->name); 9124 9125 bundle->flip_addrs[planes_count].flip_immediate = 9126 crtc->state->async_flip && 9127 acrtc_state->update_type == UPDATE_TYPE_FAST && 9128 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 9129 9130 timestamp_ns = ktime_get_ns(); 9131 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9132 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9133 bundle->surface_updates[planes_count].surface = dc_plane; 9134 9135 if (!bundle->surface_updates[planes_count].surface) { 9136 DRM_ERROR("No surface for CRTC: id=%d\n", 9137 acrtc_attach->crtc_id); 9138 continue; 9139 } 9140 9141 if (plane == pcrtc->primary) 9142 update_freesync_state_on_stream( 9143 dm, 9144 acrtc_state, 9145 acrtc_state->stream, 9146 dc_plane, 9147 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9148 9149 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9150 __func__, 9151 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9152 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9153 9154 planes_count += 1; 9155 9156 } 9157 9158 if (pflip_present) { 9159 if (!vrr_active) { 9160 /* Use old throttling in non-vrr fixed refresh rate mode 9161 * to keep flip scheduling based on target vblank counts 9162 * working in a backwards compatible way, e.g., for 9163 * clients using the GLX_OML_sync_control extension or 9164 * DRI3/Present extension with defined target_msc. 9165 */ 9166 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9167 } else { 9168 /* For variable refresh rate mode only: 9169 * Get vblank of last completed flip to avoid > 1 vrr 9170 * flips per video frame by use of throttling, but allow 9171 * flip programming anywhere in the possibly large 9172 * variable vrr vblank interval for fine-grained flip 9173 * timing control and more opportunity to avoid stutter 9174 * on late submission of flips. 9175 */ 9176 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9177 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9178 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9179 } 9180 9181 target_vblank = last_flip_vblank + wait_for_vblank; 9182 9183 /* 9184 * Wait until we're out of the vertical blank period before the one 9185 * targeted by the flip 9186 */ 9187 while ((acrtc_attach->enabled && 9188 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9189 0, &vpos, &hpos, NULL, 9190 NULL, &pcrtc->hwmode) 9191 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9192 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9193 (int)(target_vblank - 9194 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9195 usleep_range(1000, 1100); 9196 } 9197 9198 /** 9199 * Prepare the flip event for the pageflip interrupt to handle. 9200 * 9201 * This only works in the case where we've already turned on the 9202 * appropriate hardware blocks (eg. HUBP) so in the transition case 9203 * from 0 -> n planes we have to skip a hardware generated event 9204 * and rely on sending it from software. 9205 */ 9206 if (acrtc_attach->base.state->event && 9207 acrtc_state->active_planes > 0) { 9208 drm_crtc_vblank_get(pcrtc); 9209 9210 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9211 9212 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9213 prepare_flip_isr(acrtc_attach); 9214 9215 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9216 } 9217 9218 if (acrtc_state->stream) { 9219 if (acrtc_state->freesync_vrr_info_changed) 9220 bundle->stream_update.vrr_infopacket = 9221 &acrtc_state->stream->vrr_infopacket; 9222 } 9223 } else if (cursor_update && acrtc_state->active_planes > 0) { 9224 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9225 if (acrtc_attach->base.state->event) { 9226 drm_crtc_vblank_get(pcrtc); 9227 acrtc_attach->event = acrtc_attach->base.state->event; 9228 acrtc_attach->base.state->event = NULL; 9229 } 9230 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9231 } 9232 9233 /* Update the planes if changed or disable if we don't have any. */ 9234 if ((planes_count || acrtc_state->active_planes == 0) && 9235 acrtc_state->stream) { 9236 /* 9237 * If PSR or idle optimizations are enabled then flush out 9238 * any pending work before hardware programming. 9239 */ 9240 if (dm->vblank_control_workqueue) 9241 flush_workqueue(dm->vblank_control_workqueue); 9242 9243 bundle->stream_update.stream = acrtc_state->stream; 9244 if (new_pcrtc_state->mode_changed) { 9245 bundle->stream_update.src = acrtc_state->stream->src; 9246 bundle->stream_update.dst = acrtc_state->stream->dst; 9247 } 9248 9249 if (new_pcrtc_state->color_mgmt_changed) { 9250 /* 9251 * TODO: This isn't fully correct since we've actually 9252 * already modified the stream in place. 9253 */ 9254 bundle->stream_update.gamut_remap = 9255 &acrtc_state->stream->gamut_remap_matrix; 9256 bundle->stream_update.output_csc_transform = 9257 &acrtc_state->stream->csc_color_matrix; 9258 bundle->stream_update.out_transfer_func = 9259 &acrtc_state->stream->out_transfer_func; 9260 bundle->stream_update.lut3d_func = 9261 (struct dc_3dlut *) acrtc_state->stream->lut3d_func; 9262 bundle->stream_update.func_shaper = 9263 (struct dc_transfer_func *) acrtc_state->stream->func_shaper; 9264 } 9265 9266 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9267 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9268 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9269 9270 mutex_lock(&dm->dc_lock); 9271 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9272 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9273 amdgpu_dm_replay_disable(acrtc_state->stream); 9274 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9275 amdgpu_dm_psr_disable(acrtc_state->stream); 9276 } 9277 mutex_unlock(&dm->dc_lock); 9278 9279 /* 9280 * If FreeSync state on the stream has changed then we need to 9281 * re-adjust the min/max bounds now that DC doesn't handle this 9282 * as part of commit. 9283 */ 9284 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9285 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9286 dc_stream_adjust_vmin_vmax( 9287 dm->dc, acrtc_state->stream, 9288 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9289 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9290 } 9291 mutex_lock(&dm->dc_lock); 9292 update_planes_and_stream_adapter(dm->dc, 9293 acrtc_state->update_type, 9294 planes_count, 9295 acrtc_state->stream, 9296 &bundle->stream_update, 9297 bundle->surface_updates); 9298 updated_planes_and_streams = true; 9299 9300 /** 9301 * Enable or disable the interrupts on the backend. 9302 * 9303 * Most pipes are put into power gating when unused. 9304 * 9305 * When power gating is enabled on a pipe we lose the 9306 * interrupt enablement state when power gating is disabled. 9307 * 9308 * So we need to update the IRQ control state in hardware 9309 * whenever the pipe turns on (since it could be previously 9310 * power gated) or off (since some pipes can't be power gated 9311 * on some ASICs). 9312 */ 9313 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9314 dm_update_pflip_irq_state(drm_to_adev(dev), 9315 acrtc_attach); 9316 9317 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 9318 mutex_unlock(&dm->dc_lock); 9319 } 9320 9321 /* 9322 * Update cursor state *after* programming all the planes. 9323 * This avoids redundant programming in the case where we're going 9324 * to be disabling a single plane - those pipes are being disabled. 9325 */ 9326 if (acrtc_state->active_planes && 9327 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && 9328 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9329 amdgpu_dm_commit_cursors(state); 9330 9331 cleanup: 9332 kfree(bundle); 9333 } 9334 9335 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9336 struct drm_atomic_state *state) 9337 { 9338 struct amdgpu_device *adev = drm_to_adev(dev); 9339 struct amdgpu_dm_connector *aconnector; 9340 struct drm_connector *connector; 9341 struct drm_connector_state *old_con_state, *new_con_state; 9342 struct drm_crtc_state *new_crtc_state; 9343 struct dm_crtc_state *new_dm_crtc_state; 9344 const struct dc_stream_status *status; 9345 int i, inst; 9346 9347 /* Notify device removals. */ 9348 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9349 if (old_con_state->crtc != new_con_state->crtc) { 9350 /* CRTC changes require notification. */ 9351 goto notify; 9352 } 9353 9354 if (!new_con_state->crtc) 9355 continue; 9356 9357 new_crtc_state = drm_atomic_get_new_crtc_state( 9358 state, new_con_state->crtc); 9359 9360 if (!new_crtc_state) 9361 continue; 9362 9363 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9364 continue; 9365 9366 notify: 9367 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9368 continue; 9369 9370 aconnector = to_amdgpu_dm_connector(connector); 9371 9372 mutex_lock(&adev->dm.audio_lock); 9373 inst = aconnector->audio_inst; 9374 aconnector->audio_inst = -1; 9375 mutex_unlock(&adev->dm.audio_lock); 9376 9377 amdgpu_dm_audio_eld_notify(adev, inst); 9378 } 9379 9380 /* Notify audio device additions. */ 9381 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9382 if (!new_con_state->crtc) 9383 continue; 9384 9385 new_crtc_state = drm_atomic_get_new_crtc_state( 9386 state, new_con_state->crtc); 9387 9388 if (!new_crtc_state) 9389 continue; 9390 9391 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9392 continue; 9393 9394 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9395 if (!new_dm_crtc_state->stream) 9396 continue; 9397 9398 status = dc_stream_get_status(new_dm_crtc_state->stream); 9399 if (!status) 9400 continue; 9401 9402 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9403 continue; 9404 9405 aconnector = to_amdgpu_dm_connector(connector); 9406 9407 mutex_lock(&adev->dm.audio_lock); 9408 inst = status->audio_inst; 9409 aconnector->audio_inst = inst; 9410 mutex_unlock(&adev->dm.audio_lock); 9411 9412 amdgpu_dm_audio_eld_notify(adev, inst); 9413 } 9414 } 9415 9416 /* 9417 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9418 * @crtc_state: the DRM CRTC state 9419 * @stream_state: the DC stream state. 9420 * 9421 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9422 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9423 */ 9424 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9425 struct dc_stream_state *stream_state) 9426 { 9427 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9428 } 9429 9430 static void dm_clear_writeback(struct amdgpu_display_manager *dm, 9431 struct dm_crtc_state *crtc_state) 9432 { 9433 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 9434 } 9435 9436 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 9437 struct dc_state *dc_state) 9438 { 9439 struct drm_device *dev = state->dev; 9440 struct amdgpu_device *adev = drm_to_adev(dev); 9441 struct amdgpu_display_manager *dm = &adev->dm; 9442 struct drm_crtc *crtc; 9443 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9444 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9445 struct drm_connector_state *old_con_state; 9446 struct drm_connector *connector; 9447 bool mode_set_reset_required = false; 9448 u32 i; 9449 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 9450 bool set_backlight_level = false; 9451 9452 /* Disable writeback */ 9453 for_each_old_connector_in_state(state, connector, old_con_state, i) { 9454 struct dm_connector_state *dm_old_con_state; 9455 struct amdgpu_crtc *acrtc; 9456 9457 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 9458 continue; 9459 9460 old_crtc_state = NULL; 9461 9462 dm_old_con_state = to_dm_connector_state(old_con_state); 9463 if (!dm_old_con_state->base.crtc) 9464 continue; 9465 9466 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 9467 if (acrtc) 9468 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9469 9470 if (!acrtc || !acrtc->wb_enabled) 9471 continue; 9472 9473 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9474 9475 dm_clear_writeback(dm, dm_old_crtc_state); 9476 acrtc->wb_enabled = false; 9477 } 9478 9479 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9480 new_crtc_state, i) { 9481 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9482 9483 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9484 9485 if (old_crtc_state->active && 9486 (!new_crtc_state->active || 9487 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9488 manage_dm_interrupts(adev, acrtc, NULL); 9489 dc_stream_release(dm_old_crtc_state->stream); 9490 } 9491 } 9492 9493 drm_atomic_helper_calc_timestamping_constants(state); 9494 9495 /* update changed items */ 9496 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9497 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9498 9499 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9500 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9501 9502 drm_dbg_state(state->dev, 9503 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9504 acrtc->crtc_id, 9505 new_crtc_state->enable, 9506 new_crtc_state->active, 9507 new_crtc_state->planes_changed, 9508 new_crtc_state->mode_changed, 9509 new_crtc_state->active_changed, 9510 new_crtc_state->connectors_changed); 9511 9512 /* Disable cursor if disabling crtc */ 9513 if (old_crtc_state->active && !new_crtc_state->active) { 9514 struct dc_cursor_position position; 9515 9516 memset(&position, 0, sizeof(position)); 9517 mutex_lock(&dm->dc_lock); 9518 dc_exit_ips_for_hw_access(dm->dc); 9519 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); 9520 mutex_unlock(&dm->dc_lock); 9521 } 9522 9523 /* Copy all transient state flags into dc state */ 9524 if (dm_new_crtc_state->stream) { 9525 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9526 dm_new_crtc_state->stream); 9527 } 9528 9529 /* handles headless hotplug case, updating new_state and 9530 * aconnector as needed 9531 */ 9532 9533 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9534 9535 drm_dbg_atomic(dev, 9536 "Atomic commit: SET crtc id %d: [%p]\n", 9537 acrtc->crtc_id, acrtc); 9538 9539 if (!dm_new_crtc_state->stream) { 9540 /* 9541 * this could happen because of issues with 9542 * userspace notifications delivery. 9543 * In this case userspace tries to set mode on 9544 * display which is disconnected in fact. 9545 * dc_sink is NULL in this case on aconnector. 9546 * We expect reset mode will come soon. 9547 * 9548 * This can also happen when unplug is done 9549 * during resume sequence ended 9550 * 9551 * In this case, we want to pretend we still 9552 * have a sink to keep the pipe running so that 9553 * hw state is consistent with the sw state 9554 */ 9555 drm_dbg_atomic(dev, 9556 "Failed to create new stream for crtc %d\n", 9557 acrtc->base.base.id); 9558 continue; 9559 } 9560 9561 if (dm_old_crtc_state->stream) 9562 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9563 9564 pm_runtime_get_noresume(dev->dev); 9565 9566 acrtc->enabled = true; 9567 acrtc->hw_mode = new_crtc_state->mode; 9568 crtc->hwmode = new_crtc_state->mode; 9569 mode_set_reset_required = true; 9570 set_backlight_level = true; 9571 } else if (modereset_required(new_crtc_state)) { 9572 drm_dbg_atomic(dev, 9573 "Atomic commit: RESET. crtc id %d:[%p]\n", 9574 acrtc->crtc_id, acrtc); 9575 /* i.e. reset mode */ 9576 if (dm_old_crtc_state->stream) 9577 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9578 9579 mode_set_reset_required = true; 9580 } 9581 } /* for_each_crtc_in_state() */ 9582 9583 /* if there mode set or reset, disable eDP PSR, Replay */ 9584 if (mode_set_reset_required) { 9585 if (dm->vblank_control_workqueue) 9586 flush_workqueue(dm->vblank_control_workqueue); 9587 9588 amdgpu_dm_replay_disable_all(dm); 9589 amdgpu_dm_psr_disable_all(dm); 9590 } 9591 9592 dm_enable_per_frame_crtc_master_sync(dc_state); 9593 mutex_lock(&dm->dc_lock); 9594 dc_exit_ips_for_hw_access(dm->dc); 9595 WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); 9596 9597 /* Allow idle optimization when vblank count is 0 for display off */ 9598 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) 9599 dc_allow_idle_optimizations(dm->dc, true); 9600 mutex_unlock(&dm->dc_lock); 9601 9602 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9603 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9604 9605 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9606 9607 if (dm_new_crtc_state->stream != NULL) { 9608 const struct dc_stream_status *status = 9609 dc_stream_get_status(dm_new_crtc_state->stream); 9610 9611 if (!status) 9612 status = dc_state_get_stream_status(dc_state, 9613 dm_new_crtc_state->stream); 9614 if (!status) 9615 drm_err(dev, 9616 "got no status for stream %p on acrtc%p\n", 9617 dm_new_crtc_state->stream, acrtc); 9618 else 9619 acrtc->otg_inst = status->primary_otg_inst; 9620 } 9621 } 9622 9623 /* During boot up and resume the DC layer will reset the panel brightness 9624 * to fix a flicker issue. 9625 * It will cause the dm->actual_brightness is not the current panel brightness 9626 * level. (the dm->brightness is the correct panel level) 9627 * So we set the backlight level with dm->brightness value after set mode 9628 */ 9629 if (set_backlight_level) { 9630 for (i = 0; i < dm->num_of_edps; i++) { 9631 if (dm->backlight_dev[i]) 9632 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9633 } 9634 } 9635 } 9636 9637 static void dm_set_writeback(struct amdgpu_display_manager *dm, 9638 struct dm_crtc_state *crtc_state, 9639 struct drm_connector *connector, 9640 struct drm_connector_state *new_con_state) 9641 { 9642 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 9643 struct amdgpu_device *adev = dm->adev; 9644 struct amdgpu_crtc *acrtc; 9645 struct dc_writeback_info *wb_info; 9646 struct pipe_ctx *pipe = NULL; 9647 struct amdgpu_framebuffer *afb; 9648 int i = 0; 9649 9650 wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); 9651 if (!wb_info) { 9652 DRM_ERROR("Failed to allocate wb_info\n"); 9653 return; 9654 } 9655 9656 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 9657 if (!acrtc) { 9658 DRM_ERROR("no amdgpu_crtc found\n"); 9659 kfree(wb_info); 9660 return; 9661 } 9662 9663 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 9664 if (!afb) { 9665 DRM_ERROR("No amdgpu_framebuffer found\n"); 9666 kfree(wb_info); 9667 return; 9668 } 9669 9670 for (i = 0; i < MAX_PIPES; i++) { 9671 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 9672 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 9673 break; 9674 } 9675 } 9676 9677 /* fill in wb_info */ 9678 wb_info->wb_enabled = true; 9679 9680 wb_info->dwb_pipe_inst = 0; 9681 wb_info->dwb_params.dwbscl_black_color = 0; 9682 wb_info->dwb_params.hdr_mult = 0x1F000; 9683 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 9684 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 9685 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 9686 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 9687 9688 /* width & height from crtc */ 9689 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 9690 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 9691 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 9692 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 9693 9694 wb_info->dwb_params.cnv_params.crop_en = false; 9695 wb_info->dwb_params.stereo_params.stereo_enabled = false; 9696 9697 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 9698 wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 9699 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 9700 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 9701 9702 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 9703 9704 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 9705 9706 wb_info->dwb_params.scaler_taps.h_taps = 4; 9707 wb_info->dwb_params.scaler_taps.v_taps = 4; 9708 wb_info->dwb_params.scaler_taps.h_taps_c = 2; 9709 wb_info->dwb_params.scaler_taps.v_taps_c = 2; 9710 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 9711 9712 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 9713 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 9714 9715 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 9716 wb_info->mcif_buf_params.luma_address[i] = afb->address; 9717 wb_info->mcif_buf_params.chroma_address[i] = 0; 9718 } 9719 9720 wb_info->mcif_buf_params.p_vmid = 1; 9721 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { 9722 wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 9723 wb_info->mcif_warmup_params.region_size = 9724 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 9725 } 9726 wb_info->mcif_warmup_params.p_vmid = 1; 9727 wb_info->writeback_source_plane = pipe->plane_state; 9728 9729 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 9730 9731 acrtc->wb_pending = true; 9732 acrtc->wb_conn = wb_conn; 9733 drm_writeback_queue_job(wb_conn, new_con_state); 9734 } 9735 9736 /** 9737 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9738 * @state: The atomic state to commit 9739 * 9740 * This will tell DC to commit the constructed DC state from atomic_check, 9741 * programming the hardware. Any failures here implies a hardware failure, since 9742 * atomic check should have filtered anything non-kosher. 9743 */ 9744 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9745 { 9746 struct drm_device *dev = state->dev; 9747 struct amdgpu_device *adev = drm_to_adev(dev); 9748 struct amdgpu_display_manager *dm = &adev->dm; 9749 struct dm_atomic_state *dm_state; 9750 struct dc_state *dc_state = NULL; 9751 u32 i, j; 9752 struct drm_crtc *crtc; 9753 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9754 unsigned long flags; 9755 bool wait_for_vblank = true; 9756 struct drm_connector *connector; 9757 struct drm_connector_state *old_con_state, *new_con_state; 9758 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9759 int crtc_disable_count = 0; 9760 9761 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9762 9763 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9764 drm_dp_mst_atomic_wait_for_dependencies(state); 9765 9766 dm_state = dm_atomic_get_new_state(state); 9767 if (dm_state && dm_state->context) { 9768 dc_state = dm_state->context; 9769 amdgpu_dm_commit_streams(state, dc_state); 9770 } 9771 9772 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9773 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9774 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9775 struct amdgpu_dm_connector *aconnector; 9776 9777 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9778 continue; 9779 9780 aconnector = to_amdgpu_dm_connector(connector); 9781 9782 if (!adev->dm.hdcp_workqueue) 9783 continue; 9784 9785 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 9786 9787 if (!connector) 9788 continue; 9789 9790 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 9791 connector->index, connector->status, connector->dpms); 9792 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 9793 old_con_state->content_protection, new_con_state->content_protection); 9794 9795 if (aconnector->dc_sink) { 9796 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 9797 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 9798 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 9799 aconnector->dc_sink->edid_caps.display_name); 9800 } 9801 } 9802 9803 new_crtc_state = NULL; 9804 old_crtc_state = NULL; 9805 9806 if (acrtc) { 9807 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9808 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9809 } 9810 9811 if (old_crtc_state) 9812 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9813 old_crtc_state->enable, 9814 old_crtc_state->active, 9815 old_crtc_state->mode_changed, 9816 old_crtc_state->active_changed, 9817 old_crtc_state->connectors_changed); 9818 9819 if (new_crtc_state) 9820 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9821 new_crtc_state->enable, 9822 new_crtc_state->active, 9823 new_crtc_state->mode_changed, 9824 new_crtc_state->active_changed, 9825 new_crtc_state->connectors_changed); 9826 } 9827 9828 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9829 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9830 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9831 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9832 9833 if (!adev->dm.hdcp_workqueue) 9834 continue; 9835 9836 new_crtc_state = NULL; 9837 old_crtc_state = NULL; 9838 9839 if (acrtc) { 9840 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9841 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9842 } 9843 9844 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9845 9846 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9847 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9848 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9849 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9850 dm_new_con_state->update_hdcp = true; 9851 continue; 9852 } 9853 9854 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 9855 old_con_state, connector, adev->dm.hdcp_workqueue)) { 9856 /* when display is unplugged from mst hub, connctor will 9857 * be destroyed within dm_dp_mst_connector_destroy. connector 9858 * hdcp perperties, like type, undesired, desired, enabled, 9859 * will be lost. So, save hdcp properties into hdcp_work within 9860 * amdgpu_dm_atomic_commit_tail. if the same display is 9861 * plugged back with same display index, its hdcp properties 9862 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 9863 */ 9864 9865 bool enable_encryption = false; 9866 9867 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 9868 enable_encryption = true; 9869 9870 if (aconnector->dc_link && aconnector->dc_sink && 9871 aconnector->dc_link->type == dc_connection_mst_branch) { 9872 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 9873 struct hdcp_workqueue *hdcp_w = 9874 &hdcp_work[aconnector->dc_link->link_index]; 9875 9876 hdcp_w->hdcp_content_type[connector->index] = 9877 new_con_state->hdcp_content_type; 9878 hdcp_w->content_protection[connector->index] = 9879 new_con_state->content_protection; 9880 } 9881 9882 if (new_crtc_state && new_crtc_state->mode_changed && 9883 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 9884 enable_encryption = true; 9885 9886 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 9887 9888 if (aconnector->dc_link) 9889 hdcp_update_display( 9890 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9891 new_con_state->hdcp_content_type, enable_encryption); 9892 } 9893 } 9894 9895 /* Handle connector state changes */ 9896 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9897 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9898 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9899 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9900 struct dc_surface_update *dummy_updates; 9901 struct dc_stream_update stream_update; 9902 struct dc_info_packet hdr_packet; 9903 struct dc_stream_status *status = NULL; 9904 bool abm_changed, hdr_changed, scaling_changed; 9905 9906 memset(&stream_update, 0, sizeof(stream_update)); 9907 9908 if (acrtc) { 9909 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9910 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9911 } 9912 9913 /* Skip any modesets/resets */ 9914 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9915 continue; 9916 9917 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9918 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9919 9920 scaling_changed = is_scaling_state_different(dm_new_con_state, 9921 dm_old_con_state); 9922 9923 abm_changed = dm_new_crtc_state->abm_level != 9924 dm_old_crtc_state->abm_level; 9925 9926 hdr_changed = 9927 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9928 9929 if (!scaling_changed && !abm_changed && !hdr_changed) 9930 continue; 9931 9932 stream_update.stream = dm_new_crtc_state->stream; 9933 if (scaling_changed) { 9934 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9935 dm_new_con_state, dm_new_crtc_state->stream); 9936 9937 stream_update.src = dm_new_crtc_state->stream->src; 9938 stream_update.dst = dm_new_crtc_state->stream->dst; 9939 } 9940 9941 if (abm_changed) { 9942 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9943 9944 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9945 } 9946 9947 if (hdr_changed) { 9948 fill_hdr_info_packet(new_con_state, &hdr_packet); 9949 stream_update.hdr_static_metadata = &hdr_packet; 9950 } 9951 9952 status = dc_stream_get_status(dm_new_crtc_state->stream); 9953 9954 if (WARN_ON(!status)) 9955 continue; 9956 9957 WARN_ON(!status->plane_count); 9958 9959 /* 9960 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9961 * Here we create an empty update on each plane. 9962 * To fix this, DC should permit updating only stream properties. 9963 */ 9964 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 9965 if (!dummy_updates) { 9966 DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); 9967 continue; 9968 } 9969 for (j = 0; j < status->plane_count; j++) 9970 dummy_updates[j].surface = status->plane_states[0]; 9971 9972 sort(dummy_updates, status->plane_count, 9973 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); 9974 9975 mutex_lock(&dm->dc_lock); 9976 dc_exit_ips_for_hw_access(dm->dc); 9977 dc_update_planes_and_stream(dm->dc, 9978 dummy_updates, 9979 status->plane_count, 9980 dm_new_crtc_state->stream, 9981 &stream_update); 9982 mutex_unlock(&dm->dc_lock); 9983 kfree(dummy_updates); 9984 } 9985 9986 /** 9987 * Enable interrupts for CRTCs that are newly enabled or went through 9988 * a modeset. It was intentionally deferred until after the front end 9989 * state was modified to wait until the OTG was on and so the IRQ 9990 * handlers didn't access stale or invalid state. 9991 */ 9992 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9993 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9994 #ifdef CONFIG_DEBUG_FS 9995 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9996 #endif 9997 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9998 if (old_crtc_state->active && !new_crtc_state->active) 9999 crtc_disable_count++; 10000 10001 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10002 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10003 10004 /* For freesync config update on crtc state and params for irq */ 10005 update_stream_irq_parameters(dm, dm_new_crtc_state); 10006 10007 #ifdef CONFIG_DEBUG_FS 10008 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10009 cur_crc_src = acrtc->dm_irq_params.crc_src; 10010 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10011 #endif 10012 10013 if (new_crtc_state->active && 10014 (!old_crtc_state->active || 10015 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10016 dc_stream_retain(dm_new_crtc_state->stream); 10017 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10018 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); 10019 } 10020 /* Handle vrr on->off / off->on transitions */ 10021 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 10022 10023 #ifdef CONFIG_DEBUG_FS 10024 if (new_crtc_state->active && 10025 (!old_crtc_state->active || 10026 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10027 /** 10028 * Frontend may have changed so reapply the CRC capture 10029 * settings for the stream. 10030 */ 10031 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10032 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10033 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10034 uint8_t cnt; 10035 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10036 for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { 10037 if (acrtc->dm_irq_params.window_param[cnt].enable) { 10038 acrtc->dm_irq_params.window_param[cnt].update_win = true; 10039 10040 /** 10041 * It takes 2 frames for HW to stably generate CRC when 10042 * resuming from suspend, so we set skip_frame_cnt 2. 10043 */ 10044 acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2; 10045 } 10046 } 10047 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10048 } 10049 #endif 10050 if (amdgpu_dm_crtc_configure_crc_source( 10051 crtc, dm_new_crtc_state, cur_crc_src)) 10052 drm_dbg_atomic(dev, "Failed to configure crc source"); 10053 } 10054 } 10055 #endif 10056 } 10057 10058 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 10059 if (new_crtc_state->async_flip) 10060 wait_for_vblank = false; 10061 10062 /* update planes when needed per crtc*/ 10063 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 10064 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10065 10066 if (dm_new_crtc_state->stream) 10067 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 10068 } 10069 10070 /* Enable writeback */ 10071 for_each_new_connector_in_state(state, connector, new_con_state, i) { 10072 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10073 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10074 10075 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 10076 continue; 10077 10078 if (!new_con_state->writeback_job) 10079 continue; 10080 10081 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10082 10083 if (!new_crtc_state) 10084 continue; 10085 10086 if (acrtc->wb_enabled) 10087 continue; 10088 10089 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10090 10091 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 10092 acrtc->wb_enabled = true; 10093 } 10094 10095 /* Update audio instances for each connector. */ 10096 amdgpu_dm_commit_audio(dev, state); 10097 10098 /* restore the backlight level */ 10099 for (i = 0; i < dm->num_of_edps; i++) { 10100 if (dm->backlight_dev[i] && 10101 (dm->actual_brightness[i] != dm->brightness[i])) 10102 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10103 } 10104 10105 /* 10106 * send vblank event on all events not handled in flip and 10107 * mark consumed event for drm_atomic_helper_commit_hw_done 10108 */ 10109 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10110 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10111 10112 if (new_crtc_state->event) 10113 drm_send_event_locked(dev, &new_crtc_state->event->base); 10114 10115 new_crtc_state->event = NULL; 10116 } 10117 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10118 10119 /* Signal HW programming completion */ 10120 drm_atomic_helper_commit_hw_done(state); 10121 10122 if (wait_for_vblank) 10123 drm_atomic_helper_wait_for_flip_done(dev, state); 10124 10125 drm_atomic_helper_cleanup_planes(dev, state); 10126 10127 /* Don't free the memory if we are hitting this as part of suspend. 10128 * This way we don't free any memory during suspend; see 10129 * amdgpu_bo_free_kernel(). The memory will be freed in the first 10130 * non-suspend modeset or when the driver is torn down. 10131 */ 10132 if (!adev->in_suspend) { 10133 /* return the stolen vga memory back to VRAM */ 10134 if (!adev->mman.keep_stolen_vga_memory) 10135 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 10136 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 10137 } 10138 10139 /* 10140 * Finally, drop a runtime PM reference for each newly disabled CRTC, 10141 * so we can put the GPU into runtime suspend if we're not driving any 10142 * displays anymore 10143 */ 10144 for (i = 0; i < crtc_disable_count; i++) 10145 pm_runtime_put_autosuspend(dev->dev); 10146 pm_runtime_mark_last_busy(dev->dev); 10147 10148 trace_amdgpu_dm_atomic_commit_tail_finish(state); 10149 } 10150 10151 static int dm_force_atomic_commit(struct drm_connector *connector) 10152 { 10153 int ret = 0; 10154 struct drm_device *ddev = connector->dev; 10155 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 10156 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10157 struct drm_plane *plane = disconnected_acrtc->base.primary; 10158 struct drm_connector_state *conn_state; 10159 struct drm_crtc_state *crtc_state; 10160 struct drm_plane_state *plane_state; 10161 10162 if (!state) 10163 return -ENOMEM; 10164 10165 state->acquire_ctx = ddev->mode_config.acquire_ctx; 10166 10167 /* Construct an atomic state to restore previous display setting */ 10168 10169 /* 10170 * Attach connectors to drm_atomic_state 10171 */ 10172 conn_state = drm_atomic_get_connector_state(state, connector); 10173 10174 ret = PTR_ERR_OR_ZERO(conn_state); 10175 if (ret) 10176 goto out; 10177 10178 /* Attach crtc to drm_atomic_state*/ 10179 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 10180 10181 ret = PTR_ERR_OR_ZERO(crtc_state); 10182 if (ret) 10183 goto out; 10184 10185 /* force a restore */ 10186 crtc_state->mode_changed = true; 10187 10188 /* Attach plane to drm_atomic_state */ 10189 plane_state = drm_atomic_get_plane_state(state, plane); 10190 10191 ret = PTR_ERR_OR_ZERO(plane_state); 10192 if (ret) 10193 goto out; 10194 10195 /* Call commit internally with the state we just constructed */ 10196 ret = drm_atomic_commit(state); 10197 10198 out: 10199 drm_atomic_state_put(state); 10200 if (ret) 10201 DRM_ERROR("Restoring old state failed with %i\n", ret); 10202 10203 return ret; 10204 } 10205 10206 /* 10207 * This function handles all cases when set mode does not come upon hotplug. 10208 * This includes when a display is unplugged then plugged back into the 10209 * same port and when running without usermode desktop manager supprot 10210 */ 10211 void dm_restore_drm_connector_state(struct drm_device *dev, 10212 struct drm_connector *connector) 10213 { 10214 struct amdgpu_dm_connector *aconnector; 10215 struct amdgpu_crtc *disconnected_acrtc; 10216 struct dm_crtc_state *acrtc_state; 10217 10218 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10219 return; 10220 10221 aconnector = to_amdgpu_dm_connector(connector); 10222 10223 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10224 return; 10225 10226 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10227 if (!disconnected_acrtc) 10228 return; 10229 10230 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10231 if (!acrtc_state->stream) 10232 return; 10233 10234 /* 10235 * If the previous sink is not released and different from the current, 10236 * we deduce we are in a state where we can not rely on usermode call 10237 * to turn on the display, so we do it here 10238 */ 10239 if (acrtc_state->stream->sink != aconnector->dc_sink) 10240 dm_force_atomic_commit(&aconnector->base); 10241 } 10242 10243 /* 10244 * Grabs all modesetting locks to serialize against any blocking commits, 10245 * Waits for completion of all non blocking commits. 10246 */ 10247 static int do_aquire_global_lock(struct drm_device *dev, 10248 struct drm_atomic_state *state) 10249 { 10250 struct drm_crtc *crtc; 10251 struct drm_crtc_commit *commit; 10252 long ret; 10253 10254 /* 10255 * Adding all modeset locks to aquire_ctx will 10256 * ensure that when the framework release it the 10257 * extra locks we are locking here will get released to 10258 */ 10259 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10260 if (ret) 10261 return ret; 10262 10263 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10264 spin_lock(&crtc->commit_lock); 10265 commit = list_first_entry_or_null(&crtc->commit_list, 10266 struct drm_crtc_commit, commit_entry); 10267 if (commit) 10268 drm_crtc_commit_get(commit); 10269 spin_unlock(&crtc->commit_lock); 10270 10271 if (!commit) 10272 continue; 10273 10274 /* 10275 * Make sure all pending HW programming completed and 10276 * page flips done 10277 */ 10278 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10279 10280 if (ret > 0) 10281 ret = wait_for_completion_interruptible_timeout( 10282 &commit->flip_done, 10*HZ); 10283 10284 if (ret == 0) 10285 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 10286 crtc->base.id, crtc->name); 10287 10288 drm_crtc_commit_put(commit); 10289 } 10290 10291 return ret < 0 ? ret : 0; 10292 } 10293 10294 static void get_freesync_config_for_crtc( 10295 struct dm_crtc_state *new_crtc_state, 10296 struct dm_connector_state *new_con_state) 10297 { 10298 struct mod_freesync_config config = {0}; 10299 struct amdgpu_dm_connector *aconnector; 10300 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10301 int vrefresh = drm_mode_vrefresh(mode); 10302 bool fs_vid_mode = false; 10303 10304 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10305 return; 10306 10307 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 10308 10309 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10310 vrefresh >= aconnector->min_vfreq && 10311 vrefresh <= aconnector->max_vfreq; 10312 10313 if (new_crtc_state->vrr_supported) { 10314 new_crtc_state->stream->ignore_msa_timing_param = true; 10315 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10316 10317 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10318 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10319 config.vsif_supported = true; 10320 config.btr = true; 10321 10322 if (fs_vid_mode) { 10323 config.state = VRR_STATE_ACTIVE_FIXED; 10324 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10325 goto out; 10326 } else if (new_crtc_state->base.vrr_enabled) { 10327 config.state = VRR_STATE_ACTIVE_VARIABLE; 10328 } else { 10329 config.state = VRR_STATE_INACTIVE; 10330 } 10331 } 10332 out: 10333 new_crtc_state->freesync_config = config; 10334 } 10335 10336 static void reset_freesync_config_for_crtc( 10337 struct dm_crtc_state *new_crtc_state) 10338 { 10339 new_crtc_state->vrr_supported = false; 10340 10341 memset(&new_crtc_state->vrr_infopacket, 0, 10342 sizeof(new_crtc_state->vrr_infopacket)); 10343 } 10344 10345 static bool 10346 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10347 struct drm_crtc_state *new_crtc_state) 10348 { 10349 const struct drm_display_mode *old_mode, *new_mode; 10350 10351 if (!old_crtc_state || !new_crtc_state) 10352 return false; 10353 10354 old_mode = &old_crtc_state->mode; 10355 new_mode = &new_crtc_state->mode; 10356 10357 if (old_mode->clock == new_mode->clock && 10358 old_mode->hdisplay == new_mode->hdisplay && 10359 old_mode->vdisplay == new_mode->vdisplay && 10360 old_mode->htotal == new_mode->htotal && 10361 old_mode->vtotal != new_mode->vtotal && 10362 old_mode->hsync_start == new_mode->hsync_start && 10363 old_mode->vsync_start != new_mode->vsync_start && 10364 old_mode->hsync_end == new_mode->hsync_end && 10365 old_mode->vsync_end != new_mode->vsync_end && 10366 old_mode->hskew == new_mode->hskew && 10367 old_mode->vscan == new_mode->vscan && 10368 (old_mode->vsync_end - old_mode->vsync_start) == 10369 (new_mode->vsync_end - new_mode->vsync_start)) 10370 return true; 10371 10372 return false; 10373 } 10374 10375 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 10376 { 10377 u64 num, den, res; 10378 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10379 10380 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10381 10382 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10383 den = (unsigned long long)new_crtc_state->mode.htotal * 10384 (unsigned long long)new_crtc_state->mode.vtotal; 10385 10386 res = div_u64(num, den); 10387 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10388 } 10389 10390 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10391 struct drm_atomic_state *state, 10392 struct drm_crtc *crtc, 10393 struct drm_crtc_state *old_crtc_state, 10394 struct drm_crtc_state *new_crtc_state, 10395 bool enable, 10396 bool *lock_and_validation_needed) 10397 { 10398 struct dm_atomic_state *dm_state = NULL; 10399 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10400 struct dc_stream_state *new_stream; 10401 int ret = 0; 10402 10403 /* 10404 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10405 * update changed items 10406 */ 10407 struct amdgpu_crtc *acrtc = NULL; 10408 struct drm_connector *connector = NULL; 10409 struct amdgpu_dm_connector *aconnector = NULL; 10410 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10411 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10412 10413 new_stream = NULL; 10414 10415 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10416 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10417 acrtc = to_amdgpu_crtc(crtc); 10418 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10419 if (connector) 10420 aconnector = to_amdgpu_dm_connector(connector); 10421 10422 /* TODO This hack should go away */ 10423 if (connector && enable) { 10424 /* Make sure fake sink is created in plug-in scenario */ 10425 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10426 connector); 10427 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10428 connector); 10429 10430 if (IS_ERR(drm_new_conn_state)) { 10431 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 10432 goto fail; 10433 } 10434 10435 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10436 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10437 10438 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10439 goto skip_modeset; 10440 10441 new_stream = create_validate_stream_for_sink(aconnector, 10442 &new_crtc_state->mode, 10443 dm_new_conn_state, 10444 dm_old_crtc_state->stream); 10445 10446 /* 10447 * we can have no stream on ACTION_SET if a display 10448 * was disconnected during S3, in this case it is not an 10449 * error, the OS will be updated after detection, and 10450 * will do the right thing on next atomic commit 10451 */ 10452 10453 if (!new_stream) { 10454 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 10455 __func__, acrtc->base.base.id); 10456 ret = -ENOMEM; 10457 goto fail; 10458 } 10459 10460 /* 10461 * TODO: Check VSDB bits to decide whether this should 10462 * be enabled or not. 10463 */ 10464 new_stream->triggered_crtc_reset.enabled = 10465 dm->force_timing_sync; 10466 10467 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10468 10469 ret = fill_hdr_info_packet(drm_new_conn_state, 10470 &new_stream->hdr_static_metadata); 10471 if (ret) 10472 goto fail; 10473 10474 /* 10475 * If we already removed the old stream from the context 10476 * (and set the new stream to NULL) then we can't reuse 10477 * the old stream even if the stream and scaling are unchanged. 10478 * We'll hit the BUG_ON and black screen. 10479 * 10480 * TODO: Refactor this function to allow this check to work 10481 * in all conditions. 10482 */ 10483 if (amdgpu_freesync_vid_mode && 10484 dm_new_crtc_state->stream && 10485 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10486 goto skip_modeset; 10487 10488 if (dm_new_crtc_state->stream && 10489 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10490 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10491 new_crtc_state->mode_changed = false; 10492 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10493 new_crtc_state->mode_changed); 10494 } 10495 } 10496 10497 /* mode_changed flag may get updated above, need to check again */ 10498 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10499 goto skip_modeset; 10500 10501 drm_dbg_state(state->dev, 10502 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 10503 acrtc->crtc_id, 10504 new_crtc_state->enable, 10505 new_crtc_state->active, 10506 new_crtc_state->planes_changed, 10507 new_crtc_state->mode_changed, 10508 new_crtc_state->active_changed, 10509 new_crtc_state->connectors_changed); 10510 10511 /* Remove stream for any changed/disabled CRTC */ 10512 if (!enable) { 10513 10514 if (!dm_old_crtc_state->stream) 10515 goto skip_modeset; 10516 10517 /* Unset freesync video if it was active before */ 10518 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 10519 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 10520 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 10521 } 10522 10523 /* Now check if we should set freesync video mode */ 10524 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 10525 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10526 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 10527 is_timing_unchanged_for_freesync(new_crtc_state, 10528 old_crtc_state)) { 10529 new_crtc_state->mode_changed = false; 10530 DRM_DEBUG_DRIVER( 10531 "Mode change not required for front porch change, setting mode_changed to %d", 10532 new_crtc_state->mode_changed); 10533 10534 set_freesync_fixed_config(dm_new_crtc_state); 10535 10536 goto skip_modeset; 10537 } else if (amdgpu_freesync_vid_mode && aconnector && 10538 is_freesync_video_mode(&new_crtc_state->mode, 10539 aconnector)) { 10540 struct drm_display_mode *high_mode; 10541 10542 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10543 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 10544 set_freesync_fixed_config(dm_new_crtc_state); 10545 } 10546 10547 ret = dm_atomic_get_state(state, &dm_state); 10548 if (ret) 10549 goto fail; 10550 10551 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10552 crtc->base.id); 10553 10554 /* i.e. reset mode */ 10555 if (dc_state_remove_stream( 10556 dm->dc, 10557 dm_state->context, 10558 dm_old_crtc_state->stream) != DC_OK) { 10559 ret = -EINVAL; 10560 goto fail; 10561 } 10562 10563 dc_stream_release(dm_old_crtc_state->stream); 10564 dm_new_crtc_state->stream = NULL; 10565 10566 reset_freesync_config_for_crtc(dm_new_crtc_state); 10567 10568 *lock_and_validation_needed = true; 10569 10570 } else {/* Add stream for any updated/enabled CRTC */ 10571 /* 10572 * Quick fix to prevent NULL pointer on new_stream when 10573 * added MST connectors not found in existing crtc_state in the chained mode 10574 * TODO: need to dig out the root cause of that 10575 */ 10576 if (!connector) 10577 goto skip_modeset; 10578 10579 if (modereset_required(new_crtc_state)) 10580 goto skip_modeset; 10581 10582 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 10583 dm_old_crtc_state->stream)) { 10584 10585 WARN_ON(dm_new_crtc_state->stream); 10586 10587 ret = dm_atomic_get_state(state, &dm_state); 10588 if (ret) 10589 goto fail; 10590 10591 dm_new_crtc_state->stream = new_stream; 10592 10593 dc_stream_retain(new_stream); 10594 10595 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10596 crtc->base.id); 10597 10598 if (dc_state_add_stream( 10599 dm->dc, 10600 dm_state->context, 10601 dm_new_crtc_state->stream) != DC_OK) { 10602 ret = -EINVAL; 10603 goto fail; 10604 } 10605 10606 *lock_and_validation_needed = true; 10607 } 10608 } 10609 10610 skip_modeset: 10611 /* Release extra reference */ 10612 if (new_stream) 10613 dc_stream_release(new_stream); 10614 10615 /* 10616 * We want to do dc stream updates that do not require a 10617 * full modeset below. 10618 */ 10619 if (!(enable && connector && new_crtc_state->active)) 10620 return 0; 10621 /* 10622 * Given above conditions, the dc state cannot be NULL because: 10623 * 1. We're in the process of enabling CRTCs (just been added 10624 * to the dc context, or already is on the context) 10625 * 2. Has a valid connector attached, and 10626 * 3. Is currently active and enabled. 10627 * => The dc stream state currently exists. 10628 */ 10629 BUG_ON(dm_new_crtc_state->stream == NULL); 10630 10631 /* Scaling or underscan settings */ 10632 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10633 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10634 update_stream_scaling_settings( 10635 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10636 10637 /* ABM settings */ 10638 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10639 10640 /* 10641 * Color management settings. We also update color properties 10642 * when a modeset is needed, to ensure it gets reprogrammed. 10643 */ 10644 if (dm_new_crtc_state->base.color_mgmt_changed || 10645 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 10646 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10647 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10648 if (ret) 10649 goto fail; 10650 } 10651 10652 /* Update Freesync settings. */ 10653 get_freesync_config_for_crtc(dm_new_crtc_state, 10654 dm_new_conn_state); 10655 10656 return ret; 10657 10658 fail: 10659 if (new_stream) 10660 dc_stream_release(new_stream); 10661 return ret; 10662 } 10663 10664 static bool should_reset_plane(struct drm_atomic_state *state, 10665 struct drm_plane *plane, 10666 struct drm_plane_state *old_plane_state, 10667 struct drm_plane_state *new_plane_state) 10668 { 10669 struct drm_plane *other; 10670 struct drm_plane_state *old_other_state, *new_other_state; 10671 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10672 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 10673 struct amdgpu_device *adev = drm_to_adev(plane->dev); 10674 int i; 10675 10676 /* 10677 * TODO: Remove this hack for all asics once it proves that the 10678 * fast updates works fine on DCN3.2+. 10679 */ 10680 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 10681 state->allow_modeset) 10682 return true; 10683 10684 /* Exit early if we know that we're adding or removing the plane. */ 10685 if (old_plane_state->crtc != new_plane_state->crtc) 10686 return true; 10687 10688 /* old crtc == new_crtc == NULL, plane not in context. */ 10689 if (!new_plane_state->crtc) 10690 return false; 10691 10692 new_crtc_state = 10693 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10694 old_crtc_state = 10695 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); 10696 10697 if (!new_crtc_state) 10698 return true; 10699 10700 /* 10701 * A change in cursor mode means a new dc pipe needs to be acquired or 10702 * released from the state 10703 */ 10704 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 10705 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 10706 if (plane->type == DRM_PLANE_TYPE_CURSOR && 10707 old_dm_crtc_state != NULL && 10708 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { 10709 return true; 10710 } 10711 10712 /* CRTC Degamma changes currently require us to recreate planes. */ 10713 if (new_crtc_state->color_mgmt_changed) 10714 return true; 10715 10716 /* 10717 * On zpos change, planes need to be reordered by removing and re-adding 10718 * them one by one to the dc state, in order of descending zpos. 10719 * 10720 * TODO: We can likely skip bandwidth validation if the only thing that 10721 * changed about the plane was it'z z-ordering. 10722 */ 10723 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) 10724 return true; 10725 10726 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10727 return true; 10728 10729 /* 10730 * If there are any new primary or overlay planes being added or 10731 * removed then the z-order can potentially change. To ensure 10732 * correct z-order and pipe acquisition the current DC architecture 10733 * requires us to remove and recreate all existing planes. 10734 * 10735 * TODO: Come up with a more elegant solution for this. 10736 */ 10737 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10738 struct amdgpu_framebuffer *old_afb, *new_afb; 10739 struct dm_plane_state *dm_new_other_state, *dm_old_other_state; 10740 10741 dm_new_other_state = to_dm_plane_state(new_other_state); 10742 dm_old_other_state = to_dm_plane_state(old_other_state); 10743 10744 if (other->type == DRM_PLANE_TYPE_CURSOR) 10745 continue; 10746 10747 if (old_other_state->crtc != new_plane_state->crtc && 10748 new_other_state->crtc != new_plane_state->crtc) 10749 continue; 10750 10751 if (old_other_state->crtc != new_other_state->crtc) 10752 return true; 10753 10754 /* Src/dst size and scaling updates. */ 10755 if (old_other_state->src_w != new_other_state->src_w || 10756 old_other_state->src_h != new_other_state->src_h || 10757 old_other_state->crtc_w != new_other_state->crtc_w || 10758 old_other_state->crtc_h != new_other_state->crtc_h) 10759 return true; 10760 10761 /* Rotation / mirroring updates. */ 10762 if (old_other_state->rotation != new_other_state->rotation) 10763 return true; 10764 10765 /* Blending updates. */ 10766 if (old_other_state->pixel_blend_mode != 10767 new_other_state->pixel_blend_mode) 10768 return true; 10769 10770 /* Alpha updates. */ 10771 if (old_other_state->alpha != new_other_state->alpha) 10772 return true; 10773 10774 /* Colorspace changes. */ 10775 if (old_other_state->color_range != new_other_state->color_range || 10776 old_other_state->color_encoding != new_other_state->color_encoding) 10777 return true; 10778 10779 /* HDR/Transfer Function changes. */ 10780 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || 10781 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || 10782 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || 10783 dm_old_other_state->ctm != dm_new_other_state->ctm || 10784 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || 10785 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || 10786 dm_old_other_state->lut3d != dm_new_other_state->lut3d || 10787 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || 10788 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) 10789 return true; 10790 10791 /* Framebuffer checks fall at the end. */ 10792 if (!old_other_state->fb || !new_other_state->fb) 10793 continue; 10794 10795 /* Pixel format changes can require bandwidth updates. */ 10796 if (old_other_state->fb->format != new_other_state->fb->format) 10797 return true; 10798 10799 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10800 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10801 10802 /* Tiling and DCC changes also require bandwidth updates. */ 10803 if (old_afb->tiling_flags != new_afb->tiling_flags || 10804 old_afb->base.modifier != new_afb->base.modifier) 10805 return true; 10806 } 10807 10808 return false; 10809 } 10810 10811 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10812 struct drm_plane_state *new_plane_state, 10813 struct drm_framebuffer *fb) 10814 { 10815 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10816 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10817 unsigned int pitch; 10818 bool linear; 10819 10820 if (fb->width > new_acrtc->max_cursor_width || 10821 fb->height > new_acrtc->max_cursor_height) { 10822 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10823 new_plane_state->fb->width, 10824 new_plane_state->fb->height); 10825 return -EINVAL; 10826 } 10827 if (new_plane_state->src_w != fb->width << 16 || 10828 new_plane_state->src_h != fb->height << 16) { 10829 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10830 return -EINVAL; 10831 } 10832 10833 /* Pitch in pixels */ 10834 pitch = fb->pitches[0] / fb->format->cpp[0]; 10835 10836 if (fb->width != pitch) { 10837 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10838 fb->width, pitch); 10839 return -EINVAL; 10840 } 10841 10842 switch (pitch) { 10843 case 64: 10844 case 128: 10845 case 256: 10846 /* FB pitch is supported by cursor plane */ 10847 break; 10848 default: 10849 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10850 return -EINVAL; 10851 } 10852 10853 /* Core DRM takes care of checking FB modifiers, so we only need to 10854 * check tiling flags when the FB doesn't have a modifier. 10855 */ 10856 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10857 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 10858 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; 10859 } else if (adev->family >= AMDGPU_FAMILY_AI) { 10860 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10861 } else { 10862 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10863 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10864 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10865 } 10866 if (!linear) { 10867 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10868 return -EINVAL; 10869 } 10870 } 10871 10872 return 0; 10873 } 10874 10875 /* 10876 * Helper function for checking the cursor in native mode 10877 */ 10878 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, 10879 struct drm_plane *plane, 10880 struct drm_plane_state *new_plane_state, 10881 bool enable) 10882 { 10883 10884 struct amdgpu_crtc *new_acrtc; 10885 int ret; 10886 10887 if (!enable || !new_plane_crtc || 10888 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10889 return 0; 10890 10891 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10892 10893 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10894 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10895 return -EINVAL; 10896 } 10897 10898 if (new_plane_state->fb) { 10899 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10900 new_plane_state->fb); 10901 if (ret) 10902 return ret; 10903 } 10904 10905 return 0; 10906 } 10907 10908 static bool dm_should_update_native_cursor(struct drm_atomic_state *state, 10909 struct drm_crtc *old_plane_crtc, 10910 struct drm_crtc *new_plane_crtc, 10911 bool enable) 10912 { 10913 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10914 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10915 10916 if (!enable) { 10917 if (old_plane_crtc == NULL) 10918 return true; 10919 10920 old_crtc_state = drm_atomic_get_old_crtc_state( 10921 state, old_plane_crtc); 10922 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10923 10924 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10925 } else { 10926 if (new_plane_crtc == NULL) 10927 return true; 10928 10929 new_crtc_state = drm_atomic_get_new_crtc_state( 10930 state, new_plane_crtc); 10931 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10932 10933 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10934 } 10935 } 10936 10937 static int dm_update_plane_state(struct dc *dc, 10938 struct drm_atomic_state *state, 10939 struct drm_plane *plane, 10940 struct drm_plane_state *old_plane_state, 10941 struct drm_plane_state *new_plane_state, 10942 bool enable, 10943 bool *lock_and_validation_needed, 10944 bool *is_top_most_overlay) 10945 { 10946 10947 struct dm_atomic_state *dm_state = NULL; 10948 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10949 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10950 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10951 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10952 bool needs_reset, update_native_cursor; 10953 int ret = 0; 10954 10955 10956 new_plane_crtc = new_plane_state->crtc; 10957 old_plane_crtc = old_plane_state->crtc; 10958 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10959 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10960 10961 update_native_cursor = dm_should_update_native_cursor(state, 10962 old_plane_crtc, 10963 new_plane_crtc, 10964 enable); 10965 10966 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { 10967 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 10968 new_plane_state, enable); 10969 if (ret) 10970 return ret; 10971 10972 return 0; 10973 } 10974 10975 needs_reset = should_reset_plane(state, plane, old_plane_state, 10976 new_plane_state); 10977 10978 /* Remove any changed/removed planes */ 10979 if (!enable) { 10980 if (!needs_reset) 10981 return 0; 10982 10983 if (!old_plane_crtc) 10984 return 0; 10985 10986 old_crtc_state = drm_atomic_get_old_crtc_state( 10987 state, old_plane_crtc); 10988 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10989 10990 if (!dm_old_crtc_state->stream) 10991 return 0; 10992 10993 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10994 plane->base.id, old_plane_crtc->base.id); 10995 10996 ret = dm_atomic_get_state(state, &dm_state); 10997 if (ret) 10998 return ret; 10999 11000 if (!dc_state_remove_plane( 11001 dc, 11002 dm_old_crtc_state->stream, 11003 dm_old_plane_state->dc_state, 11004 dm_state->context)) { 11005 11006 return -EINVAL; 11007 } 11008 11009 if (dm_old_plane_state->dc_state) 11010 dc_plane_state_release(dm_old_plane_state->dc_state); 11011 11012 dm_new_plane_state->dc_state = NULL; 11013 11014 *lock_and_validation_needed = true; 11015 11016 } else { /* Add new planes */ 11017 struct dc_plane_state *dc_new_plane_state; 11018 11019 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 11020 return 0; 11021 11022 if (!new_plane_crtc) 11023 return 0; 11024 11025 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 11026 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11027 11028 if (!dm_new_crtc_state->stream) 11029 return 0; 11030 11031 if (!needs_reset) 11032 return 0; 11033 11034 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 11035 if (ret) 11036 goto out; 11037 11038 WARN_ON(dm_new_plane_state->dc_state); 11039 11040 dc_new_plane_state = dc_create_plane_state(dc); 11041 if (!dc_new_plane_state) { 11042 ret = -ENOMEM; 11043 goto out; 11044 } 11045 11046 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 11047 plane->base.id, new_plane_crtc->base.id); 11048 11049 ret = fill_dc_plane_attributes( 11050 drm_to_adev(new_plane_crtc->dev), 11051 dc_new_plane_state, 11052 new_plane_state, 11053 new_crtc_state); 11054 if (ret) { 11055 dc_plane_state_release(dc_new_plane_state); 11056 goto out; 11057 } 11058 11059 ret = dm_atomic_get_state(state, &dm_state); 11060 if (ret) { 11061 dc_plane_state_release(dc_new_plane_state); 11062 goto out; 11063 } 11064 11065 /* 11066 * Any atomic check errors that occur after this will 11067 * not need a release. The plane state will be attached 11068 * to the stream, and therefore part of the atomic 11069 * state. It'll be released when the atomic state is 11070 * cleaned. 11071 */ 11072 if (!dc_state_add_plane( 11073 dc, 11074 dm_new_crtc_state->stream, 11075 dc_new_plane_state, 11076 dm_state->context)) { 11077 11078 dc_plane_state_release(dc_new_plane_state); 11079 ret = -EINVAL; 11080 goto out; 11081 } 11082 11083 dm_new_plane_state->dc_state = dc_new_plane_state; 11084 11085 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 11086 11087 /* Tell DC to do a full surface update every time there 11088 * is a plane change. Inefficient, but works for now. 11089 */ 11090 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 11091 11092 *lock_and_validation_needed = true; 11093 } 11094 11095 out: 11096 /* If enabling cursor overlay failed, attempt fallback to native mode */ 11097 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { 11098 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11099 new_plane_state, enable); 11100 if (ret) 11101 return ret; 11102 11103 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; 11104 } 11105 11106 return ret; 11107 } 11108 11109 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 11110 int *src_w, int *src_h) 11111 { 11112 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 11113 case DRM_MODE_ROTATE_90: 11114 case DRM_MODE_ROTATE_270: 11115 *src_w = plane_state->src_h >> 16; 11116 *src_h = plane_state->src_w >> 16; 11117 break; 11118 case DRM_MODE_ROTATE_0: 11119 case DRM_MODE_ROTATE_180: 11120 default: 11121 *src_w = plane_state->src_w >> 16; 11122 *src_h = plane_state->src_h >> 16; 11123 break; 11124 } 11125 } 11126 11127 static void 11128 dm_get_plane_scale(struct drm_plane_state *plane_state, 11129 int *out_plane_scale_w, int *out_plane_scale_h) 11130 { 11131 int plane_src_w, plane_src_h; 11132 11133 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 11134 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; 11135 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; 11136 } 11137 11138 /* 11139 * The normalized_zpos value cannot be used by this iterator directly. It's only 11140 * calculated for enabled planes, potentially causing normalized_zpos collisions 11141 * between enabled/disabled planes in the atomic state. We need a unique value 11142 * so that the iterator will not generate the same object twice, or loop 11143 * indefinitely. 11144 */ 11145 static inline struct __drm_planes_state *__get_next_zpos( 11146 struct drm_atomic_state *state, 11147 struct __drm_planes_state *prev) 11148 { 11149 unsigned int highest_zpos = 0, prev_zpos = 256; 11150 uint32_t highest_id = 0, prev_id = UINT_MAX; 11151 struct drm_plane_state *new_plane_state; 11152 struct drm_plane *plane; 11153 int i, highest_i = -1; 11154 11155 if (prev != NULL) { 11156 prev_zpos = prev->new_state->zpos; 11157 prev_id = prev->ptr->base.id; 11158 } 11159 11160 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 11161 /* Skip planes with higher zpos than the previously returned */ 11162 if (new_plane_state->zpos > prev_zpos || 11163 (new_plane_state->zpos == prev_zpos && 11164 plane->base.id >= prev_id)) 11165 continue; 11166 11167 /* Save the index of the plane with highest zpos */ 11168 if (new_plane_state->zpos > highest_zpos || 11169 (new_plane_state->zpos == highest_zpos && 11170 plane->base.id > highest_id)) { 11171 highest_zpos = new_plane_state->zpos; 11172 highest_id = plane->base.id; 11173 highest_i = i; 11174 } 11175 } 11176 11177 if (highest_i < 0) 11178 return NULL; 11179 11180 return &state->planes[highest_i]; 11181 } 11182 11183 /* 11184 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate 11185 * by descending zpos, as read from the new plane state. This is the same 11186 * ordering as defined by drm_atomic_normalize_zpos(). 11187 */ 11188 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ 11189 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ 11190 __i != NULL; __i = __get_next_zpos((__state), __i)) \ 11191 for_each_if(((plane) = __i->ptr, \ 11192 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ 11193 (old_plane_state) = __i->old_state, \ 11194 (new_plane_state) = __i->new_state, 1)) 11195 11196 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 11197 { 11198 struct drm_connector *connector; 11199 struct drm_connector_state *conn_state, *old_conn_state; 11200 struct amdgpu_dm_connector *aconnector = NULL; 11201 int i; 11202 11203 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 11204 if (!conn_state->crtc) 11205 conn_state = old_conn_state; 11206 11207 if (conn_state->crtc != crtc) 11208 continue; 11209 11210 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11211 continue; 11212 11213 aconnector = to_amdgpu_dm_connector(connector); 11214 if (!aconnector->mst_output_port || !aconnector->mst_root) 11215 aconnector = NULL; 11216 else 11217 break; 11218 } 11219 11220 if (!aconnector) 11221 return 0; 11222 11223 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 11224 } 11225 11226 /** 11227 * DOC: Cursor Modes - Native vs Overlay 11228 * 11229 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw 11230 * plane. It does not require a dedicated hw plane to enable, but it is 11231 * subjected to the same z-order and scaling as the hw plane. It also has format 11232 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB 11233 * hw plane. 11234 * 11235 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its 11236 * own scaling and z-pos. It also has no blending restrictions. It lends to a 11237 * cursor behavior more akin to a DRM client's expectations. However, it does 11238 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is 11239 * available. 11240 */ 11241 11242 /** 11243 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 11244 * @adev: amdgpu device 11245 * @state: DRM atomic state 11246 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor 11247 * @cursor_mode: Returns the required cursor mode on dm_crtc_state 11248 * 11249 * Get whether the cursor should be enabled in native mode, or overlay mode, on 11250 * the dm_crtc_state. 11251 * 11252 * The cursor should be enabled in overlay mode if there exists an underlying 11253 * plane - on which the cursor may be blended - that is either YUV formatted, or 11254 * scaled differently from the cursor. 11255 * 11256 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 11257 * calling this function. 11258 * 11259 * Return: 0 on success, or an error code if getting the cursor plane state 11260 * failed. 11261 */ 11262 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, 11263 struct drm_atomic_state *state, 11264 struct dm_crtc_state *dm_crtc_state, 11265 enum amdgpu_dm_cursor_mode *cursor_mode) 11266 { 11267 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; 11268 struct drm_crtc_state *crtc_state = &dm_crtc_state->base; 11269 struct drm_plane *plane; 11270 bool consider_mode_change = false; 11271 bool entire_crtc_covered = false; 11272 bool cursor_changed = false; 11273 int underlying_scale_w, underlying_scale_h; 11274 int cursor_scale_w, cursor_scale_h; 11275 int i; 11276 11277 /* Overlay cursor not supported on HW before DCN 11278 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 11279 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 11280 */ 11281 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 11282 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11283 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11284 return 0; 11285 } 11286 11287 /* Init cursor_mode to be the same as current */ 11288 *cursor_mode = dm_crtc_state->cursor_mode; 11289 11290 /* 11291 * Cursor mode can change if a plane's format changes, scale changes, is 11292 * enabled/disabled, or z-order changes. 11293 */ 11294 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 11295 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 11296 11297 /* Only care about planes on this CRTC */ 11298 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) 11299 continue; 11300 11301 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11302 cursor_changed = true; 11303 11304 if (drm_atomic_plane_enabling(old_plane_state, plane_state) || 11305 drm_atomic_plane_disabling(old_plane_state, plane_state) || 11306 old_plane_state->fb->format != plane_state->fb->format) { 11307 consider_mode_change = true; 11308 break; 11309 } 11310 11311 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 11312 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 11313 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 11314 consider_mode_change = true; 11315 break; 11316 } 11317 } 11318 11319 if (!consider_mode_change && !crtc_state->zpos_changed) 11320 return 0; 11321 11322 /* 11323 * If no cursor change on this CRTC, and not enabled on this CRTC, then 11324 * no need to set cursor mode. This avoids needlessly locking the cursor 11325 * state. 11326 */ 11327 if (!cursor_changed && 11328 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { 11329 return 0; 11330 } 11331 11332 cursor_state = drm_atomic_get_plane_state(state, 11333 crtc_state->crtc->cursor); 11334 if (IS_ERR(cursor_state)) 11335 return PTR_ERR(cursor_state); 11336 11337 /* Cursor is disabled */ 11338 if (!cursor_state->fb) 11339 return 0; 11340 11341 /* For all planes in descending z-order (all of which are below cursor 11342 * as per zpos definitions), check their scaling and format 11343 */ 11344 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { 11345 11346 /* Only care about non-cursor planes on this CRTC */ 11347 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || 11348 plane->type == DRM_PLANE_TYPE_CURSOR) 11349 continue; 11350 11351 /* Underlying plane is YUV format - use overlay cursor */ 11352 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 11353 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11354 return 0; 11355 } 11356 11357 dm_get_plane_scale(plane_state, 11358 &underlying_scale_w, &underlying_scale_h); 11359 dm_get_plane_scale(cursor_state, 11360 &cursor_scale_w, &cursor_scale_h); 11361 11362 /* Underlying plane has different scale - use overlay cursor */ 11363 if (cursor_scale_w != underlying_scale_w && 11364 cursor_scale_h != underlying_scale_h) { 11365 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11366 return 0; 11367 } 11368 11369 /* If this plane covers the whole CRTC, no need to check planes underneath */ 11370 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && 11371 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && 11372 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { 11373 entire_crtc_covered = true; 11374 break; 11375 } 11376 } 11377 11378 /* If planes do not cover the entire CRTC, use overlay mode to enable 11379 * cursor over holes 11380 */ 11381 if (entire_crtc_covered) 11382 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11383 else 11384 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11385 11386 return 0; 11387 } 11388 11389 /** 11390 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11391 * 11392 * @dev: The DRM device 11393 * @state: The atomic state to commit 11394 * 11395 * Validate that the given atomic state is programmable by DC into hardware. 11396 * This involves constructing a &struct dc_state reflecting the new hardware 11397 * state we wish to commit, then querying DC to see if it is programmable. It's 11398 * important not to modify the existing DC state. Otherwise, atomic_check 11399 * may unexpectedly commit hardware changes. 11400 * 11401 * When validating the DC state, it's important that the right locks are 11402 * acquired. For full updates case which removes/adds/updates streams on one 11403 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 11404 * that any such full update commit will wait for completion of any outstanding 11405 * flip using DRMs synchronization events. 11406 * 11407 * Note that DM adds the affected connectors for all CRTCs in state, when that 11408 * might not seem necessary. This is because DC stream creation requires the 11409 * DC sink, which is tied to the DRM connector state. Cleaning this up should 11410 * be possible but non-trivial - a possible TODO item. 11411 * 11412 * Return: -Error code if validation failed. 11413 */ 11414 static int amdgpu_dm_atomic_check(struct drm_device *dev, 11415 struct drm_atomic_state *state) 11416 { 11417 struct amdgpu_device *adev = drm_to_adev(dev); 11418 struct dm_atomic_state *dm_state = NULL; 11419 struct dc *dc = adev->dm.dc; 11420 struct drm_connector *connector; 11421 struct drm_connector_state *old_con_state, *new_con_state; 11422 struct drm_crtc *crtc; 11423 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11424 struct drm_plane *plane; 11425 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; 11426 enum dc_status status; 11427 int ret, i; 11428 bool lock_and_validation_needed = false; 11429 bool is_top_most_overlay = true; 11430 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11431 struct drm_dp_mst_topology_mgr *mgr; 11432 struct drm_dp_mst_topology_state *mst_state; 11433 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; 11434 11435 trace_amdgpu_dm_atomic_check_begin(state); 11436 11437 ret = drm_atomic_helper_check_modeset(dev, state); 11438 if (ret) { 11439 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); 11440 goto fail; 11441 } 11442 11443 /* Check connector changes */ 11444 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11445 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11446 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11447 11448 /* Skip connectors that are disabled or part of modeset already. */ 11449 if (!new_con_state->crtc) 11450 continue; 11451 11452 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 11453 if (IS_ERR(new_crtc_state)) { 11454 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); 11455 ret = PTR_ERR(new_crtc_state); 11456 goto fail; 11457 } 11458 11459 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 11460 dm_old_con_state->scaling != dm_new_con_state->scaling) 11461 new_crtc_state->connectors_changed = true; 11462 } 11463 11464 if (dc_resource_is_dsc_encoding_supported(dc)) { 11465 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11466 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11467 ret = add_affected_mst_dsc_crtcs(state, crtc); 11468 if (ret) { 11469 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); 11470 goto fail; 11471 } 11472 } 11473 } 11474 } 11475 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11476 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11477 11478 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 11479 !new_crtc_state->color_mgmt_changed && 11480 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 11481 dm_old_crtc_state->dsc_force_changed == false) 11482 continue; 11483 11484 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 11485 if (ret) { 11486 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); 11487 goto fail; 11488 } 11489 11490 if (!new_crtc_state->enable) 11491 continue; 11492 11493 ret = drm_atomic_add_affected_connectors(state, crtc); 11494 if (ret) { 11495 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); 11496 goto fail; 11497 } 11498 11499 ret = drm_atomic_add_affected_planes(state, crtc); 11500 if (ret) { 11501 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); 11502 goto fail; 11503 } 11504 11505 if (dm_old_crtc_state->dsc_force_changed) 11506 new_crtc_state->mode_changed = true; 11507 } 11508 11509 /* 11510 * Add all primary and overlay planes on the CRTC to the state 11511 * whenever a plane is enabled to maintain correct z-ordering 11512 * and to enable fast surface updates. 11513 */ 11514 drm_for_each_crtc(crtc, dev) { 11515 bool modified = false; 11516 11517 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11518 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11519 continue; 11520 11521 if (new_plane_state->crtc == crtc || 11522 old_plane_state->crtc == crtc) { 11523 modified = true; 11524 break; 11525 } 11526 } 11527 11528 if (!modified) 11529 continue; 11530 11531 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11532 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11533 continue; 11534 11535 new_plane_state = 11536 drm_atomic_get_plane_state(state, plane); 11537 11538 if (IS_ERR(new_plane_state)) { 11539 ret = PTR_ERR(new_plane_state); 11540 drm_dbg_atomic(dev, "new_plane_state is BAD\n"); 11541 goto fail; 11542 } 11543 } 11544 } 11545 11546 /* 11547 * DC consults the zpos (layer_index in DC terminology) to determine the 11548 * hw plane on which to enable the hw cursor (see 11549 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 11550 * atomic state, so call drm helper to normalize zpos. 11551 */ 11552 ret = drm_atomic_normalize_zpos(dev, state); 11553 if (ret) { 11554 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 11555 goto fail; 11556 } 11557 11558 /* 11559 * Determine whether cursors on each CRTC should be enabled in native or 11560 * overlay mode. 11561 */ 11562 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11563 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11564 11565 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11566 &dm_new_crtc_state->cursor_mode); 11567 if (ret) { 11568 drm_dbg(dev, "Failed to determine cursor mode\n"); 11569 goto fail; 11570 } 11571 11572 /* 11573 * If overlay cursor is needed, DC cannot go through the 11574 * native cursor update path. All enabled planes on the CRTC 11575 * need to be added for DC to not disable a plane by mistake 11576 */ 11577 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11578 ret = drm_atomic_add_affected_planes(state, crtc); 11579 if (ret) 11580 goto fail; 11581 } 11582 } 11583 11584 /* Remove exiting planes if they are modified */ 11585 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11586 if (old_plane_state->fb && new_plane_state->fb && 11587 get_mem_type(old_plane_state->fb) != 11588 get_mem_type(new_plane_state->fb)) 11589 lock_and_validation_needed = true; 11590 11591 ret = dm_update_plane_state(dc, state, plane, 11592 old_plane_state, 11593 new_plane_state, 11594 false, 11595 &lock_and_validation_needed, 11596 &is_top_most_overlay); 11597 if (ret) { 11598 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11599 goto fail; 11600 } 11601 } 11602 11603 /* Disable all crtcs which require disable */ 11604 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11605 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11606 old_crtc_state, 11607 new_crtc_state, 11608 false, 11609 &lock_and_validation_needed); 11610 if (ret) { 11611 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); 11612 goto fail; 11613 } 11614 } 11615 11616 /* Enable all crtcs which require enable */ 11617 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11618 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11619 old_crtc_state, 11620 new_crtc_state, 11621 true, 11622 &lock_and_validation_needed); 11623 if (ret) { 11624 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); 11625 goto fail; 11626 } 11627 } 11628 11629 /* Add new/modified planes */ 11630 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11631 ret = dm_update_plane_state(dc, state, plane, 11632 old_plane_state, 11633 new_plane_state, 11634 true, 11635 &lock_and_validation_needed, 11636 &is_top_most_overlay); 11637 if (ret) { 11638 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11639 goto fail; 11640 } 11641 } 11642 11643 #if defined(CONFIG_DRM_AMD_DC_FP) 11644 if (dc_resource_is_dsc_encoding_supported(dc)) { 11645 ret = pre_validate_dsc(state, &dm_state, vars); 11646 if (ret != 0) 11647 goto fail; 11648 } 11649 #endif 11650 11651 /* Run this here since we want to validate the streams we created */ 11652 ret = drm_atomic_helper_check_planes(dev, state); 11653 if (ret) { 11654 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); 11655 goto fail; 11656 } 11657 11658 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11659 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11660 if (dm_new_crtc_state->mpo_requested) 11661 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); 11662 } 11663 11664 /* Check cursor restrictions */ 11665 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11666 enum amdgpu_dm_cursor_mode required_cursor_mode; 11667 int is_rotated, is_scaled; 11668 11669 /* Overlay cusor not subject to native cursor restrictions */ 11670 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11671 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 11672 continue; 11673 11674 /* Check if rotation or scaling is enabled on DCN401 */ 11675 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && 11676 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11677 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 11678 11679 is_rotated = new_cursor_state && 11680 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); 11681 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || 11682 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); 11683 11684 if (is_rotated || is_scaled) { 11685 drm_dbg_driver( 11686 crtc->dev, 11687 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", 11688 crtc->base.id, crtc->name); 11689 ret = -EINVAL; 11690 goto fail; 11691 } 11692 } 11693 11694 /* If HW can only do native cursor, check restrictions again */ 11695 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11696 &required_cursor_mode); 11697 if (ret) { 11698 drm_dbg_driver(crtc->dev, 11699 "[CRTC:%d:%s] Checking cursor mode failed\n", 11700 crtc->base.id, crtc->name); 11701 goto fail; 11702 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11703 drm_dbg_driver(crtc->dev, 11704 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 11705 crtc->base.id, crtc->name); 11706 ret = -EINVAL; 11707 goto fail; 11708 } 11709 } 11710 11711 if (state->legacy_cursor_update) { 11712 /* 11713 * This is a fast cursor update coming from the plane update 11714 * helper, check if it can be done asynchronously for better 11715 * performance. 11716 */ 11717 state->async_update = 11718 !drm_atomic_helper_async_check(dev, state); 11719 11720 /* 11721 * Skip the remaining global validation if this is an async 11722 * update. Cursor updates can be done without affecting 11723 * state or bandwidth calcs and this avoids the performance 11724 * penalty of locking the private state object and 11725 * allocating a new dc_state. 11726 */ 11727 if (state->async_update) 11728 return 0; 11729 } 11730 11731 /* Check scaling and underscan changes*/ 11732 /* TODO Removed scaling changes validation due to inability to commit 11733 * new stream into context w\o causing full reset. Need to 11734 * decide how to handle. 11735 */ 11736 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11737 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11738 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11739 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11740 11741 /* Skip any modesets/resets */ 11742 if (!acrtc || drm_atomic_crtc_needs_modeset( 11743 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 11744 continue; 11745 11746 /* Skip any thing not scale or underscan changes */ 11747 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 11748 continue; 11749 11750 lock_and_validation_needed = true; 11751 } 11752 11753 /* set the slot info for each mst_state based on the link encoding format */ 11754 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 11755 struct amdgpu_dm_connector *aconnector; 11756 struct drm_connector *connector; 11757 struct drm_connector_list_iter iter; 11758 u8 link_coding_cap; 11759 11760 drm_connector_list_iter_begin(dev, &iter); 11761 drm_for_each_connector_iter(connector, &iter) { 11762 if (connector->index == mst_state->mgr->conn_base_id) { 11763 aconnector = to_amdgpu_dm_connector(connector); 11764 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 11765 drm_dp_mst_update_slots(mst_state, link_coding_cap); 11766 11767 break; 11768 } 11769 } 11770 drm_connector_list_iter_end(&iter); 11771 } 11772 11773 /** 11774 * Streams and planes are reset when there are changes that affect 11775 * bandwidth. Anything that affects bandwidth needs to go through 11776 * DC global validation to ensure that the configuration can be applied 11777 * to hardware. 11778 * 11779 * We have to currently stall out here in atomic_check for outstanding 11780 * commits to finish in this case because our IRQ handlers reference 11781 * DRM state directly - we can end up disabling interrupts too early 11782 * if we don't. 11783 * 11784 * TODO: Remove this stall and drop DM state private objects. 11785 */ 11786 if (lock_and_validation_needed) { 11787 ret = dm_atomic_get_state(state, &dm_state); 11788 if (ret) { 11789 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); 11790 goto fail; 11791 } 11792 11793 ret = do_aquire_global_lock(dev, state); 11794 if (ret) { 11795 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); 11796 goto fail; 11797 } 11798 11799 #if defined(CONFIG_DRM_AMD_DC_FP) 11800 if (dc_resource_is_dsc_encoding_supported(dc)) { 11801 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 11802 if (ret) { 11803 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); 11804 ret = -EINVAL; 11805 goto fail; 11806 } 11807 } 11808 #endif 11809 11810 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 11811 if (ret) { 11812 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); 11813 goto fail; 11814 } 11815 11816 /* 11817 * Perform validation of MST topology in the state: 11818 * We need to perform MST atomic check before calling 11819 * dc_validate_global_state(), or there is a chance 11820 * to get stuck in an infinite loop and hang eventually. 11821 */ 11822 ret = drm_dp_mst_atomic_check(state); 11823 if (ret) { 11824 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); 11825 goto fail; 11826 } 11827 status = dc_validate_global_state(dc, dm_state->context, true); 11828 if (status != DC_OK) { 11829 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", 11830 dc_status_to_str(status), status); 11831 ret = -EINVAL; 11832 goto fail; 11833 } 11834 } else { 11835 /* 11836 * The commit is a fast update. Fast updates shouldn't change 11837 * the DC context, affect global validation, and can have their 11838 * commit work done in parallel with other commits not touching 11839 * the same resource. If we have a new DC context as part of 11840 * the DM atomic state from validation we need to free it and 11841 * retain the existing one instead. 11842 * 11843 * Furthermore, since the DM atomic state only contains the DC 11844 * context and can safely be annulled, we can free the state 11845 * and clear the associated private object now to free 11846 * some memory and avoid a possible use-after-free later. 11847 */ 11848 11849 for (i = 0; i < state->num_private_objs; i++) { 11850 struct drm_private_obj *obj = state->private_objs[i].ptr; 11851 11852 if (obj->funcs == adev->dm.atomic_obj.funcs) { 11853 int j = state->num_private_objs-1; 11854 11855 dm_atomic_destroy_state(obj, 11856 state->private_objs[i].state); 11857 11858 /* If i is not at the end of the array then the 11859 * last element needs to be moved to where i was 11860 * before the array can safely be truncated. 11861 */ 11862 if (i != j) 11863 state->private_objs[i] = 11864 state->private_objs[j]; 11865 11866 state->private_objs[j].ptr = NULL; 11867 state->private_objs[j].state = NULL; 11868 state->private_objs[j].old_state = NULL; 11869 state->private_objs[j].new_state = NULL; 11870 11871 state->num_private_objs = j; 11872 break; 11873 } 11874 } 11875 } 11876 11877 /* Store the overall update type for use later in atomic check. */ 11878 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11879 struct dm_crtc_state *dm_new_crtc_state = 11880 to_dm_crtc_state(new_crtc_state); 11881 11882 /* 11883 * Only allow async flips for fast updates that don't change 11884 * the FB pitch, the DCC state, rotation, etc. 11885 */ 11886 if (new_crtc_state->async_flip && lock_and_validation_needed) { 11887 drm_dbg_atomic(crtc->dev, 11888 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 11889 crtc->base.id, crtc->name); 11890 ret = -EINVAL; 11891 goto fail; 11892 } 11893 11894 dm_new_crtc_state->update_type = lock_and_validation_needed ? 11895 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 11896 } 11897 11898 /* Must be success */ 11899 WARN_ON(ret); 11900 11901 trace_amdgpu_dm_atomic_check_finish(state, ret); 11902 11903 return ret; 11904 11905 fail: 11906 if (ret == -EDEADLK) 11907 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); 11908 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 11909 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); 11910 else 11911 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); 11912 11913 trace_amdgpu_dm_atomic_check_finish(state, ret); 11914 11915 return ret; 11916 } 11917 11918 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 11919 unsigned int offset, 11920 unsigned int total_length, 11921 u8 *data, 11922 unsigned int length, 11923 struct amdgpu_hdmi_vsdb_info *vsdb) 11924 { 11925 bool res; 11926 union dmub_rb_cmd cmd; 11927 struct dmub_cmd_send_edid_cea *input; 11928 struct dmub_cmd_edid_cea_output *output; 11929 11930 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11931 return false; 11932 11933 memset(&cmd, 0, sizeof(cmd)); 11934 11935 input = &cmd.edid_cea.data.input; 11936 11937 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11938 cmd.edid_cea.header.sub_type = 0; 11939 cmd.edid_cea.header.payload_bytes = 11940 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11941 input->offset = offset; 11942 input->length = length; 11943 input->cea_total_length = total_length; 11944 memcpy(input->payload, data, length); 11945 11946 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 11947 if (!res) { 11948 DRM_ERROR("EDID CEA parser failed\n"); 11949 return false; 11950 } 11951 11952 output = &cmd.edid_cea.data.output; 11953 11954 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11955 if (!output->ack.success) { 11956 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11957 output->ack.offset); 11958 } 11959 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11960 if (!output->amd_vsdb.vsdb_found) 11961 return false; 11962 11963 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11964 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11965 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11966 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11967 } else { 11968 DRM_WARN("Unknown EDID CEA parser results\n"); 11969 return false; 11970 } 11971 11972 return true; 11973 } 11974 11975 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11976 u8 *edid_ext, int len, 11977 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11978 { 11979 int i; 11980 11981 /* send extension block to DMCU for parsing */ 11982 for (i = 0; i < len; i += 8) { 11983 bool res; 11984 int offset; 11985 11986 /* send 8 bytes a time */ 11987 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11988 return false; 11989 11990 if (i+8 == len) { 11991 /* EDID block sent completed, expect result */ 11992 int version, min_rate, max_rate; 11993 11994 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11995 if (res) { 11996 /* amd vsdb found */ 11997 vsdb_info->freesync_supported = 1; 11998 vsdb_info->amd_vsdb_version = version; 11999 vsdb_info->min_refresh_rate_hz = min_rate; 12000 vsdb_info->max_refresh_rate_hz = max_rate; 12001 return true; 12002 } 12003 /* not amd vsdb */ 12004 return false; 12005 } 12006 12007 /* check for ack*/ 12008 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 12009 if (!res) 12010 return false; 12011 } 12012 12013 return false; 12014 } 12015 12016 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 12017 u8 *edid_ext, int len, 12018 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12019 { 12020 int i; 12021 12022 /* send extension block to DMCU for parsing */ 12023 for (i = 0; i < len; i += 8) { 12024 /* send 8 bytes a time */ 12025 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 12026 return false; 12027 } 12028 12029 return vsdb_info->freesync_supported; 12030 } 12031 12032 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 12033 u8 *edid_ext, int len, 12034 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12035 { 12036 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 12037 bool ret; 12038 12039 mutex_lock(&adev->dm.dc_lock); 12040 if (adev->dm.dmub_srv) 12041 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 12042 else 12043 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 12044 mutex_unlock(&adev->dm.dc_lock); 12045 return ret; 12046 } 12047 12048 static void parse_edid_displayid_vrr(struct drm_connector *connector, 12049 const struct edid *edid) 12050 { 12051 u8 *edid_ext = NULL; 12052 int i; 12053 int j = 0; 12054 u16 min_vfreq; 12055 u16 max_vfreq; 12056 12057 if (edid == NULL || edid->extensions == 0) 12058 return; 12059 12060 /* Find DisplayID extension */ 12061 for (i = 0; i < edid->extensions; i++) { 12062 edid_ext = (void *)(edid + (i + 1)); 12063 if (edid_ext[0] == DISPLAYID_EXT) 12064 break; 12065 } 12066 12067 if (edid_ext == NULL) 12068 return; 12069 12070 while (j < EDID_LENGTH) { 12071 /* Get dynamic video timing range from DisplayID if available */ 12072 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 12073 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 12074 min_vfreq = edid_ext[j+9]; 12075 if (edid_ext[j+1] & 7) 12076 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 12077 else 12078 max_vfreq = edid_ext[j+10]; 12079 12080 if (max_vfreq && min_vfreq) { 12081 connector->display_info.monitor_range.max_vfreq = max_vfreq; 12082 connector->display_info.monitor_range.min_vfreq = min_vfreq; 12083 12084 return; 12085 } 12086 } 12087 j++; 12088 } 12089 } 12090 12091 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12092 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 12093 { 12094 u8 *edid_ext = NULL; 12095 int i; 12096 int j = 0; 12097 12098 if (edid == NULL || edid->extensions == 0) 12099 return -ENODEV; 12100 12101 /* Find DisplayID extension */ 12102 for (i = 0; i < edid->extensions; i++) { 12103 edid_ext = (void *)(edid + (i + 1)); 12104 if (edid_ext[0] == DISPLAYID_EXT) 12105 break; 12106 } 12107 12108 while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 12109 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 12110 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 12111 12112 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 12113 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 12114 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 12115 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 12116 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 12117 12118 return true; 12119 } 12120 j++; 12121 } 12122 12123 return false; 12124 } 12125 12126 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12127 const struct edid *edid, 12128 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12129 { 12130 u8 *edid_ext = NULL; 12131 int i; 12132 bool valid_vsdb_found = false; 12133 12134 /*----- drm_find_cea_extension() -----*/ 12135 /* No EDID or EDID extensions */ 12136 if (edid == NULL || edid->extensions == 0) 12137 return -ENODEV; 12138 12139 /* Find CEA extension */ 12140 for (i = 0; i < edid->extensions; i++) { 12141 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 12142 if (edid_ext[0] == CEA_EXT) 12143 break; 12144 } 12145 12146 if (i == edid->extensions) 12147 return -ENODEV; 12148 12149 /*----- cea_db_offsets() -----*/ 12150 if (edid_ext[0] != CEA_EXT) 12151 return -ENODEV; 12152 12153 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 12154 12155 return valid_vsdb_found ? i : -ENODEV; 12156 } 12157 12158 /** 12159 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 12160 * 12161 * @connector: Connector to query. 12162 * @drm_edid: DRM EDID from monitor 12163 * 12164 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 12165 * track of some of the display information in the internal data struct used by 12166 * amdgpu_dm. This function checks which type of connector we need to set the 12167 * FreeSync parameters. 12168 */ 12169 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 12170 const struct drm_edid *drm_edid) 12171 { 12172 int i = 0; 12173 struct amdgpu_dm_connector *amdgpu_dm_connector = 12174 to_amdgpu_dm_connector(connector); 12175 struct dm_connector_state *dm_con_state = NULL; 12176 struct dc_sink *sink; 12177 struct amdgpu_device *adev = drm_to_adev(connector->dev); 12178 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 12179 const struct edid *edid; 12180 bool freesync_capable = false; 12181 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 12182 12183 if (!connector->state) { 12184 DRM_ERROR("%s - Connector has no state", __func__); 12185 goto update; 12186 } 12187 12188 sink = amdgpu_dm_connector->dc_sink ? 12189 amdgpu_dm_connector->dc_sink : 12190 amdgpu_dm_connector->dc_em_sink; 12191 12192 drm_edid_connector_update(connector, drm_edid); 12193 12194 if (!drm_edid || !sink) { 12195 dm_con_state = to_dm_connector_state(connector->state); 12196 12197 amdgpu_dm_connector->min_vfreq = 0; 12198 amdgpu_dm_connector->max_vfreq = 0; 12199 freesync_capable = false; 12200 12201 goto update; 12202 } 12203 12204 dm_con_state = to_dm_connector_state(connector->state); 12205 12206 if (!adev->dm.freesync_module) 12207 goto update; 12208 12209 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 12210 12211 /* Some eDP panels only have the refresh rate range info in DisplayID */ 12212 if ((connector->display_info.monitor_range.min_vfreq == 0 || 12213 connector->display_info.monitor_range.max_vfreq == 0)) 12214 parse_edid_displayid_vrr(connector, edid); 12215 12216 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 12217 sink->sink_signal == SIGNAL_TYPE_EDP)) { 12218 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 12219 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 12220 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12221 freesync_capable = true; 12222 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12223 12224 if (vsdb_info.replay_mode) { 12225 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 12226 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 12227 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 12228 } 12229 12230 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 12231 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12232 if (i >= 0 && vsdb_info.freesync_supported) { 12233 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12234 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12235 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12236 freesync_capable = true; 12237 12238 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12239 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12240 } 12241 } 12242 12243 if (amdgpu_dm_connector->dc_link) 12244 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 12245 12246 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 12247 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12248 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 12249 12250 amdgpu_dm_connector->pack_sdp_v1_3 = true; 12251 amdgpu_dm_connector->as_type = as_type; 12252 amdgpu_dm_connector->vsdb_info = vsdb_info; 12253 12254 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12255 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12256 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12257 freesync_capable = true; 12258 12259 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12260 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12261 } 12262 } 12263 12264 update: 12265 if (dm_con_state) 12266 dm_con_state->freesync_capable = freesync_capable; 12267 12268 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && 12269 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { 12270 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; 12271 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; 12272 } 12273 12274 if (connector->vrr_capable_property) 12275 drm_connector_set_vrr_capable_property(connector, 12276 freesync_capable); 12277 } 12278 12279 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 12280 { 12281 struct amdgpu_device *adev = drm_to_adev(dev); 12282 struct dc *dc = adev->dm.dc; 12283 int i; 12284 12285 mutex_lock(&adev->dm.dc_lock); 12286 if (dc->current_state) { 12287 for (i = 0; i < dc->current_state->stream_count; ++i) 12288 dc->current_state->streams[i] 12289 ->triggered_crtc_reset.enabled = 12290 adev->dm.force_timing_sync; 12291 12292 dm_enable_per_frame_crtc_master_sync(dc->current_state); 12293 dc_trigger_sync(dc, dc->current_state); 12294 } 12295 mutex_unlock(&adev->dm.dc_lock); 12296 } 12297 12298 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 12299 { 12300 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 12301 dc_exit_ips_for_hw_access(dc); 12302 } 12303 12304 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 12305 u32 value, const char *func_name) 12306 { 12307 #ifdef DM_CHECK_ADDR_0 12308 if (address == 0) { 12309 drm_err(adev_to_drm(ctx->driver_context), 12310 "invalid register write. address = 0"); 12311 return; 12312 } 12313 #endif 12314 12315 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12316 cgs_write_register(ctx->cgs_device, address, value); 12317 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 12318 } 12319 12320 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 12321 const char *func_name) 12322 { 12323 u32 value; 12324 #ifdef DM_CHECK_ADDR_0 12325 if (address == 0) { 12326 drm_err(adev_to_drm(ctx->driver_context), 12327 "invalid register read; address = 0\n"); 12328 return 0; 12329 } 12330 #endif 12331 12332 if (ctx->dmub_srv && 12333 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 12334 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 12335 ASSERT(false); 12336 return 0; 12337 } 12338 12339 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12340 12341 value = cgs_read_register(ctx->cgs_device, address); 12342 12343 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 12344 12345 return value; 12346 } 12347 12348 int amdgpu_dm_process_dmub_aux_transfer_sync( 12349 struct dc_context *ctx, 12350 unsigned int link_index, 12351 struct aux_payload *payload, 12352 enum aux_return_code_type *operation_result) 12353 { 12354 struct amdgpu_device *adev = ctx->driver_context; 12355 struct dmub_notification *p_notify = adev->dm.dmub_notify; 12356 int ret = -1; 12357 12358 mutex_lock(&adev->dm.dpia_aux_lock); 12359 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 12360 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 12361 goto out; 12362 } 12363 12364 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12365 DRM_ERROR("wait_for_completion_timeout timeout!"); 12366 *operation_result = AUX_RET_ERROR_TIMEOUT; 12367 goto out; 12368 } 12369 12370 if (p_notify->result != AUX_RET_SUCCESS) { 12371 /* 12372 * Transient states before tunneling is enabled could 12373 * lead to this error. We can ignore this for now. 12374 */ 12375 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 12376 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 12377 payload->address, payload->length, 12378 p_notify->result); 12379 } 12380 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12381 goto out; 12382 } 12383 12384 12385 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 12386 if (!payload->write && p_notify->aux_reply.length && 12387 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 12388 12389 if (payload->length != p_notify->aux_reply.length) { 12390 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 12391 p_notify->aux_reply.length, 12392 payload->address, payload->length); 12393 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12394 goto out; 12395 } 12396 12397 memcpy(payload->data, p_notify->aux_reply.data, 12398 p_notify->aux_reply.length); 12399 } 12400 12401 /* success */ 12402 ret = p_notify->aux_reply.length; 12403 *operation_result = p_notify->result; 12404 out: 12405 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12406 mutex_unlock(&adev->dm.dpia_aux_lock); 12407 return ret; 12408 } 12409 12410 int amdgpu_dm_process_dmub_set_config_sync( 12411 struct dc_context *ctx, 12412 unsigned int link_index, 12413 struct set_config_cmd_payload *payload, 12414 enum set_config_status *operation_result) 12415 { 12416 struct amdgpu_device *adev = ctx->driver_context; 12417 bool is_cmd_complete; 12418 int ret; 12419 12420 mutex_lock(&adev->dm.dpia_aux_lock); 12421 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 12422 link_index, payload, adev->dm.dmub_notify); 12423 12424 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12425 ret = 0; 12426 *operation_result = adev->dm.dmub_notify->sc_status; 12427 } else { 12428 DRM_ERROR("wait_for_completion_timeout timeout!"); 12429 ret = -1; 12430 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 12431 } 12432 12433 if (!is_cmd_complete) 12434 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12435 mutex_unlock(&adev->dm.dpia_aux_lock); 12436 return ret; 12437 } 12438 12439 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12440 { 12441 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 12442 } 12443 12444 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12445 { 12446 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 12447 } 12448