1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "dc/dc_state.h" 41 #include "amdgpu_dm_trace.h" 42 #include "dpcd_defs.h" 43 #include "link/protocols/link_dpcd.h" 44 #include "link_service_types.h" 45 #include "link/protocols/link_dp_capability.h" 46 #include "link/protocols/link_ddc.h" 47 48 #include "vid.h" 49 #include "amdgpu.h" 50 #include "amdgpu_display.h" 51 #include "amdgpu_ucode.h" 52 #include "atom.h" 53 #include "amdgpu_dm.h" 54 #include "amdgpu_dm_plane.h" 55 #include "amdgpu_dm_crtc.h" 56 #include "amdgpu_dm_hdcp.h" 57 #include <drm/display/drm_hdcp_helper.h> 58 #include "amdgpu_dm_wb.h" 59 #include "amdgpu_pm.h" 60 #include "amdgpu_atombios.h" 61 62 #include "amd_shared.h" 63 #include "amdgpu_dm_irq.h" 64 #include "dm_helpers.h" 65 #include "amdgpu_dm_mst_types.h" 66 #if defined(CONFIG_DEBUG_FS) 67 #include "amdgpu_dm_debugfs.h" 68 #endif 69 #include "amdgpu_dm_psr.h" 70 #include "amdgpu_dm_replay.h" 71 72 #include "ivsrcid/ivsrcid_vislands30.h" 73 74 #include <linux/backlight.h> 75 #include <linux/module.h> 76 #include <linux/moduleparam.h> 77 #include <linux/types.h> 78 #include <linux/pm_runtime.h> 79 #include <linux/pci.h> 80 #include <linux/power_supply.h> 81 #include <linux/firmware.h> 82 #include <linux/component.h> 83 #include <linux/dmi.h> 84 #include <linux/sort.h> 85 86 #include <drm/display/drm_dp_mst_helper.h> 87 #include <drm/display/drm_hdmi_helper.h> 88 #include <drm/drm_atomic.h> 89 #include <drm/drm_atomic_uapi.h> 90 #include <drm/drm_atomic_helper.h> 91 #include <drm/drm_blend.h> 92 #include <drm/drm_fixed.h> 93 #include <drm/drm_fourcc.h> 94 #include <drm/drm_edid.h> 95 #include <drm/drm_eld.h> 96 #include <drm/drm_vblank.h> 97 #include <drm/drm_audio_component.h> 98 #include <drm/drm_gem_atomic_helper.h> 99 100 #include <acpi/video.h> 101 102 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 103 104 #include "dcn/dcn_1_0_offset.h" 105 #include "dcn/dcn_1_0_sh_mask.h" 106 #include "soc15_hw_ip.h" 107 #include "soc15_common.h" 108 #include "vega10_ip_offset.h" 109 110 #include "gc/gc_11_0_0_offset.h" 111 #include "gc/gc_11_0_0_sh_mask.h" 112 113 #include "modules/inc/mod_freesync.h" 114 #include "modules/power/power_helpers.h" 115 116 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 118 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 120 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 122 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 124 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 125 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 126 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 127 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 128 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 129 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 130 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 131 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 132 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 133 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 134 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 135 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 136 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 137 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 138 139 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 140 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 141 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 142 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 143 144 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 145 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 146 147 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 148 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 149 150 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 151 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 152 153 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 154 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 155 156 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 157 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 158 159 /* Number of bytes in PSP header for firmware. */ 160 #define PSP_HEADER_BYTES 0x100 161 162 /* Number of bytes in PSP footer for firmware. */ 163 #define PSP_FOOTER_BYTES 0x100 164 165 /** 166 * DOC: overview 167 * 168 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 169 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 170 * requests into DC requests, and DC responses into DRM responses. 171 * 172 * The root control structure is &struct amdgpu_display_manager. 173 */ 174 175 /* basic init/fini API */ 176 static int amdgpu_dm_init(struct amdgpu_device *adev); 177 static void amdgpu_dm_fini(struct amdgpu_device *adev); 178 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 179 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); 180 181 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 182 { 183 switch (link->dpcd_caps.dongle_type) { 184 case DISPLAY_DONGLE_NONE: 185 return DRM_MODE_SUBCONNECTOR_Native; 186 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 187 return DRM_MODE_SUBCONNECTOR_VGA; 188 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 189 case DISPLAY_DONGLE_DP_DVI_DONGLE: 190 return DRM_MODE_SUBCONNECTOR_DVID; 191 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 192 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 193 return DRM_MODE_SUBCONNECTOR_HDMIA; 194 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 195 default: 196 return DRM_MODE_SUBCONNECTOR_Unknown; 197 } 198 } 199 200 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 201 { 202 struct dc_link *link = aconnector->dc_link; 203 struct drm_connector *connector = &aconnector->base; 204 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 205 206 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 207 return; 208 209 if (aconnector->dc_sink) 210 subconnector = get_subconnector_type(link); 211 212 drm_object_property_set_value(&connector->base, 213 connector->dev->mode_config.dp_subconnector_property, 214 subconnector); 215 } 216 217 /* 218 * initializes drm_device display related structures, based on the information 219 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 220 * drm_encoder, drm_mode_config 221 * 222 * Returns 0 on success 223 */ 224 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 225 /* removes and deallocates the drm structures, created by the above function */ 226 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 227 228 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 229 struct amdgpu_dm_connector *amdgpu_dm_connector, 230 u32 link_index, 231 struct amdgpu_encoder *amdgpu_encoder); 232 static int amdgpu_dm_encoder_init(struct drm_device *dev, 233 struct amdgpu_encoder *aencoder, 234 uint32_t link_index); 235 236 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 237 238 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 239 240 static int amdgpu_dm_atomic_check(struct drm_device *dev, 241 struct drm_atomic_state *state); 242 243 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 244 static void handle_hpd_rx_irq(void *param); 245 246 static bool 247 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 248 struct drm_crtc_state *new_crtc_state); 249 /* 250 * dm_vblank_get_counter 251 * 252 * @brief 253 * Get counter for number of vertical blanks 254 * 255 * @param 256 * struct amdgpu_device *adev - [in] desired amdgpu device 257 * int disp_idx - [in] which CRTC to get the counter from 258 * 259 * @return 260 * Counter for vertical blanks 261 */ 262 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 263 { 264 struct amdgpu_crtc *acrtc = NULL; 265 266 if (crtc >= adev->mode_info.num_crtc) 267 return 0; 268 269 acrtc = adev->mode_info.crtcs[crtc]; 270 271 if (!acrtc->dm_irq_params.stream) { 272 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 273 crtc); 274 return 0; 275 } 276 277 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 278 } 279 280 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 281 u32 *vbl, u32 *position) 282 { 283 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; 284 struct amdgpu_crtc *acrtc = NULL; 285 struct dc *dc = adev->dm.dc; 286 287 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 288 return -EINVAL; 289 290 acrtc = adev->mode_info.crtcs[crtc]; 291 292 if (!acrtc->dm_irq_params.stream) { 293 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 294 crtc); 295 return 0; 296 } 297 298 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 299 dc_allow_idle_optimizations(dc, false); 300 301 /* 302 * TODO rework base driver to use values directly. 303 * for now parse it back into reg-format 304 */ 305 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 306 &v_blank_start, 307 &v_blank_end, 308 &h_position, 309 &v_position); 310 311 *position = v_position | (h_position << 16); 312 *vbl = v_blank_start | (v_blank_end << 16); 313 314 return 0; 315 } 316 317 static bool dm_is_idle(void *handle) 318 { 319 /* XXX todo */ 320 return true; 321 } 322 323 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) 324 { 325 /* XXX todo */ 326 return 0; 327 } 328 329 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) 330 { 331 return false; 332 } 333 334 static int dm_soft_reset(struct amdgpu_ip_block *ip_block) 335 { 336 /* XXX todo */ 337 return 0; 338 } 339 340 static struct amdgpu_crtc * 341 get_crtc_by_otg_inst(struct amdgpu_device *adev, 342 int otg_inst) 343 { 344 struct drm_device *dev = adev_to_drm(adev); 345 struct drm_crtc *crtc; 346 struct amdgpu_crtc *amdgpu_crtc; 347 348 if (WARN_ON(otg_inst == -1)) 349 return adev->mode_info.crtcs[0]; 350 351 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 352 amdgpu_crtc = to_amdgpu_crtc(crtc); 353 354 if (amdgpu_crtc->otg_inst == otg_inst) 355 return amdgpu_crtc; 356 } 357 358 return NULL; 359 } 360 361 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 362 struct dm_crtc_state *new_state) 363 { 364 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 365 return true; 366 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 367 return true; 368 else 369 return false; 370 } 371 372 /* 373 * DC will program planes with their z-order determined by their ordering 374 * in the dc_surface_updates array. This comparator is used to sort them 375 * by descending zpos. 376 */ 377 static int dm_plane_layer_index_cmp(const void *a, const void *b) 378 { 379 const struct dc_surface_update *sa = (struct dc_surface_update *)a; 380 const struct dc_surface_update *sb = (struct dc_surface_update *)b; 381 382 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ 383 return sb->surface->layer_index - sa->surface->layer_index; 384 } 385 386 /** 387 * update_planes_and_stream_adapter() - Send planes to be updated in DC 388 * 389 * DC has a generic way to update planes and stream via 390 * dc_update_planes_and_stream function; however, DM might need some 391 * adjustments and preparation before calling it. This function is a wrapper 392 * for the dc_update_planes_and_stream that does any required configuration 393 * before passing control to DC. 394 * 395 * @dc: Display Core control structure 396 * @update_type: specify whether it is FULL/MEDIUM/FAST update 397 * @planes_count: planes count to update 398 * @stream: stream state 399 * @stream_update: stream update 400 * @array_of_surface_update: dc surface update pointer 401 * 402 */ 403 static inline bool update_planes_and_stream_adapter(struct dc *dc, 404 int update_type, 405 int planes_count, 406 struct dc_stream_state *stream, 407 struct dc_stream_update *stream_update, 408 struct dc_surface_update *array_of_surface_update) 409 { 410 sort(array_of_surface_update, planes_count, 411 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); 412 413 /* 414 * Previous frame finished and HW is ready for optimization. 415 */ 416 if (update_type == UPDATE_TYPE_FAST) 417 dc_post_update_surfaces_to_stream(dc); 418 419 return dc_update_planes_and_stream(dc, 420 array_of_surface_update, 421 planes_count, 422 stream, 423 stream_update); 424 } 425 426 /** 427 * dm_pflip_high_irq() - Handle pageflip interrupt 428 * @interrupt_params: ignored 429 * 430 * Handles the pageflip interrupt by notifying all interested parties 431 * that the pageflip has been completed. 432 */ 433 static void dm_pflip_high_irq(void *interrupt_params) 434 { 435 struct amdgpu_crtc *amdgpu_crtc; 436 struct common_irq_params *irq_params = interrupt_params; 437 struct amdgpu_device *adev = irq_params->adev; 438 struct drm_device *dev = adev_to_drm(adev); 439 unsigned long flags; 440 struct drm_pending_vblank_event *e; 441 u32 vpos, hpos, v_blank_start, v_blank_end; 442 bool vrr_active; 443 444 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 445 446 /* IRQ could occur when in initial stage */ 447 /* TODO work and BO cleanup */ 448 if (amdgpu_crtc == NULL) { 449 drm_dbg_state(dev, "CRTC is null, returning.\n"); 450 return; 451 } 452 453 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 454 455 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 456 drm_dbg_state(dev, 457 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 458 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 459 amdgpu_crtc->crtc_id, amdgpu_crtc); 460 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 461 return; 462 } 463 464 /* page flip completed. */ 465 e = amdgpu_crtc->event; 466 amdgpu_crtc->event = NULL; 467 468 WARN_ON(!e); 469 470 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 471 472 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 473 if (!vrr_active || 474 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 475 &v_blank_end, &hpos, &vpos) || 476 (vpos < v_blank_start)) { 477 /* Update to correct count and vblank timestamp if racing with 478 * vblank irq. This also updates to the correct vblank timestamp 479 * even in VRR mode, as scanout is past the front-porch atm. 480 */ 481 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 482 483 /* Wake up userspace by sending the pageflip event with proper 484 * count and timestamp of vblank of flip completion. 485 */ 486 if (e) { 487 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 488 489 /* Event sent, so done with vblank for this flip */ 490 drm_crtc_vblank_put(&amdgpu_crtc->base); 491 } 492 } else if (e) { 493 /* VRR active and inside front-porch: vblank count and 494 * timestamp for pageflip event will only be up to date after 495 * drm_crtc_handle_vblank() has been executed from late vblank 496 * irq handler after start of back-porch (vline 0). We queue the 497 * pageflip event for send-out by drm_crtc_handle_vblank() with 498 * updated timestamp and count, once it runs after us. 499 * 500 * We need to open-code this instead of using the helper 501 * drm_crtc_arm_vblank_event(), as that helper would 502 * call drm_crtc_accurate_vblank_count(), which we must 503 * not call in VRR mode while we are in front-porch! 504 */ 505 506 /* sequence will be replaced by real count during send-out. */ 507 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 508 e->pipe = amdgpu_crtc->crtc_id; 509 510 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 511 e = NULL; 512 } 513 514 /* Keep track of vblank of this flip for flip throttling. We use the 515 * cooked hw counter, as that one incremented at start of this vblank 516 * of pageflip completion, so last_flip_vblank is the forbidden count 517 * for queueing new pageflips if vsync + VRR is enabled. 518 */ 519 amdgpu_crtc->dm_irq_params.last_flip_vblank = 520 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 521 522 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 523 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 524 525 drm_dbg_state(dev, 526 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 527 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 528 } 529 530 static void dm_vupdate_high_irq(void *interrupt_params) 531 { 532 struct common_irq_params *irq_params = interrupt_params; 533 struct amdgpu_device *adev = irq_params->adev; 534 struct amdgpu_crtc *acrtc; 535 struct drm_device *drm_dev; 536 struct drm_vblank_crtc *vblank; 537 ktime_t frame_duration_ns, previous_timestamp; 538 unsigned long flags; 539 int vrr_active; 540 541 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 542 543 if (acrtc) { 544 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 545 drm_dev = acrtc->base.dev; 546 vblank = drm_crtc_vblank_crtc(&acrtc->base); 547 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 548 frame_duration_ns = vblank->time - previous_timestamp; 549 550 if (frame_duration_ns > 0) { 551 trace_amdgpu_refresh_rate_track(acrtc->base.index, 552 frame_duration_ns, 553 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 554 atomic64_set(&irq_params->previous_timestamp, vblank->time); 555 } 556 557 drm_dbg_vbl(drm_dev, 558 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 559 vrr_active); 560 561 /* Core vblank handling is done here after end of front-porch in 562 * vrr mode, as vblank timestamping will give valid results 563 * while now done after front-porch. This will also deliver 564 * page-flip completion events that have been queued to us 565 * if a pageflip happened inside front-porch. 566 */ 567 if (vrr_active) { 568 amdgpu_dm_crtc_handle_vblank(acrtc); 569 570 /* BTR processing for pre-DCE12 ASICs */ 571 if (acrtc->dm_irq_params.stream && 572 adev->family < AMDGPU_FAMILY_AI) { 573 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 574 mod_freesync_handle_v_update( 575 adev->dm.freesync_module, 576 acrtc->dm_irq_params.stream, 577 &acrtc->dm_irq_params.vrr_params); 578 579 dc_stream_adjust_vmin_vmax( 580 adev->dm.dc, 581 acrtc->dm_irq_params.stream, 582 &acrtc->dm_irq_params.vrr_params.adjust); 583 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 584 } 585 } 586 } 587 } 588 589 /** 590 * dm_crtc_high_irq() - Handles CRTC interrupt 591 * @interrupt_params: used for determining the CRTC instance 592 * 593 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 594 * event handler. 595 */ 596 static void dm_crtc_high_irq(void *interrupt_params) 597 { 598 struct common_irq_params *irq_params = interrupt_params; 599 struct amdgpu_device *adev = irq_params->adev; 600 struct drm_writeback_job *job; 601 struct amdgpu_crtc *acrtc; 602 unsigned long flags; 603 int vrr_active; 604 605 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 606 if (!acrtc) 607 return; 608 609 if (acrtc->wb_conn) { 610 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 611 612 if (acrtc->wb_pending) { 613 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 614 struct drm_writeback_job, 615 list_entry); 616 acrtc->wb_pending = false; 617 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 618 619 if (job) { 620 unsigned int v_total, refresh_hz; 621 struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 622 623 v_total = stream->adjust.v_total_max ? 624 stream->adjust.v_total_max : stream->timing.v_total; 625 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 626 100LL, (v_total * stream->timing.h_total)); 627 mdelay(1000 / refresh_hz); 628 629 drm_writeback_signal_completion(acrtc->wb_conn, 0); 630 dc_stream_fc_disable_writeback(adev->dm.dc, 631 acrtc->dm_irq_params.stream, 0); 632 } 633 } else 634 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 635 } 636 637 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 638 639 drm_dbg_vbl(adev_to_drm(adev), 640 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 641 vrr_active, acrtc->dm_irq_params.active_planes); 642 643 /** 644 * Core vblank handling at start of front-porch is only possible 645 * in non-vrr mode, as only there vblank timestamping will give 646 * valid results while done in front-porch. Otherwise defer it 647 * to dm_vupdate_high_irq after end of front-porch. 648 */ 649 if (!vrr_active) 650 amdgpu_dm_crtc_handle_vblank(acrtc); 651 652 /** 653 * Following stuff must happen at start of vblank, for crc 654 * computation and below-the-range btr support in vrr mode. 655 */ 656 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 657 658 /* BTR updates need to happen before VUPDATE on Vega and above. */ 659 if (adev->family < AMDGPU_FAMILY_AI) 660 return; 661 662 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 663 664 if (acrtc->dm_irq_params.stream && 665 acrtc->dm_irq_params.vrr_params.supported && 666 acrtc->dm_irq_params.freesync_config.state == 667 VRR_STATE_ACTIVE_VARIABLE) { 668 mod_freesync_handle_v_update(adev->dm.freesync_module, 669 acrtc->dm_irq_params.stream, 670 &acrtc->dm_irq_params.vrr_params); 671 672 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 673 &acrtc->dm_irq_params.vrr_params.adjust); 674 } 675 676 /* 677 * If there aren't any active_planes then DCH HUBP may be clock-gated. 678 * In that case, pageflip completion interrupts won't fire and pageflip 679 * completion events won't get delivered. Prevent this by sending 680 * pending pageflip events from here if a flip is still pending. 681 * 682 * If any planes are enabled, use dm_pflip_high_irq() instead, to 683 * avoid race conditions between flip programming and completion, 684 * which could cause too early flip completion events. 685 */ 686 if (adev->family >= AMDGPU_FAMILY_RV && 687 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 688 acrtc->dm_irq_params.active_planes == 0) { 689 if (acrtc->event) { 690 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 691 acrtc->event = NULL; 692 drm_crtc_vblank_put(&acrtc->base); 693 } 694 acrtc->pflip_status = AMDGPU_FLIP_NONE; 695 } 696 697 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 698 } 699 700 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 701 /** 702 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 703 * DCN generation ASICs 704 * @interrupt_params: interrupt parameters 705 * 706 * Used to set crc window/read out crc value at vertical line 0 position 707 */ 708 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 709 { 710 struct common_irq_params *irq_params = interrupt_params; 711 struct amdgpu_device *adev = irq_params->adev; 712 struct amdgpu_crtc *acrtc; 713 714 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 715 716 if (!acrtc) 717 return; 718 719 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 720 } 721 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 722 723 /** 724 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 725 * @adev: amdgpu_device pointer 726 * @notify: dmub notification structure 727 * 728 * Dmub AUX or SET_CONFIG command completion processing callback 729 * Copies dmub notification to DM which is to be read by AUX command. 730 * issuing thread and also signals the event to wake up the thread. 731 */ 732 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 733 struct dmub_notification *notify) 734 { 735 if (adev->dm.dmub_notify) 736 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 737 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 738 complete(&adev->dm.dmub_aux_transfer_done); 739 } 740 741 /** 742 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 743 * @adev: amdgpu_device pointer 744 * @notify: dmub notification structure 745 * 746 * Dmub Hpd interrupt processing callback. Gets displayindex through the 747 * ink index and calls helper to do the processing. 748 */ 749 static void dmub_hpd_callback(struct amdgpu_device *adev, 750 struct dmub_notification *notify) 751 { 752 struct amdgpu_dm_connector *aconnector; 753 struct amdgpu_dm_connector *hpd_aconnector = NULL; 754 struct drm_connector *connector; 755 struct drm_connector_list_iter iter; 756 struct dc_link *link; 757 u8 link_index = 0; 758 struct drm_device *dev; 759 760 if (adev == NULL) 761 return; 762 763 if (notify == NULL) { 764 DRM_ERROR("DMUB HPD callback notification was NULL"); 765 return; 766 } 767 768 if (notify->link_index > adev->dm.dc->link_count) { 769 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 770 return; 771 } 772 773 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ 774 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { 775 DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); 776 return; 777 } 778 779 link_index = notify->link_index; 780 link = adev->dm.dc->links[link_index]; 781 dev = adev->dm.ddev; 782 783 drm_connector_list_iter_begin(dev, &iter); 784 drm_for_each_connector_iter(connector, &iter) { 785 786 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 787 continue; 788 789 aconnector = to_amdgpu_dm_connector(connector); 790 if (link && aconnector->dc_link == link) { 791 if (notify->type == DMUB_NOTIFICATION_HPD) 792 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 793 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 794 DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index); 795 else 796 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 797 notify->type, link_index); 798 799 hpd_aconnector = aconnector; 800 break; 801 } 802 } 803 drm_connector_list_iter_end(&iter); 804 805 if (hpd_aconnector) { 806 if (notify->type == DMUB_NOTIFICATION_HPD) { 807 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) 808 DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index); 809 handle_hpd_irq_helper(hpd_aconnector); 810 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { 811 handle_hpd_rx_irq(hpd_aconnector); 812 } 813 } 814 } 815 816 /** 817 * dmub_hpd_sense_callback - DMUB HPD sense processing callback. 818 * @adev: amdgpu_device pointer 819 * @notify: dmub notification structure 820 * 821 * HPD sense changes can occur during low power states and need to be 822 * notified from firmware to driver. 823 */ 824 static void dmub_hpd_sense_callback(struct amdgpu_device *adev, 825 struct dmub_notification *notify) 826 { 827 DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); 828 } 829 830 /** 831 * register_dmub_notify_callback - Sets callback for DMUB notify 832 * @adev: amdgpu_device pointer 833 * @type: Type of dmub notification 834 * @callback: Dmub interrupt callback function 835 * @dmub_int_thread_offload: offload indicator 836 * 837 * API to register a dmub callback handler for a dmub notification 838 * Also sets indicator whether callback processing to be offloaded. 839 * to dmub interrupt handling thread 840 * Return: true if successfully registered, false if there is existing registration 841 */ 842 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 843 enum dmub_notification_type type, 844 dmub_notify_interrupt_callback_t callback, 845 bool dmub_int_thread_offload) 846 { 847 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 848 adev->dm.dmub_callback[type] = callback; 849 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 850 } else 851 return false; 852 853 return true; 854 } 855 856 static void dm_handle_hpd_work(struct work_struct *work) 857 { 858 struct dmub_hpd_work *dmub_hpd_wrk; 859 860 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 861 862 if (!dmub_hpd_wrk->dmub_notify) { 863 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 864 return; 865 } 866 867 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 868 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 869 dmub_hpd_wrk->dmub_notify); 870 } 871 872 kfree(dmub_hpd_wrk->dmub_notify); 873 kfree(dmub_hpd_wrk); 874 875 } 876 877 #define DMUB_TRACE_MAX_READ 64 878 /** 879 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 880 * @interrupt_params: used for determining the Outbox instance 881 * 882 * Handles the Outbox Interrupt 883 * event handler. 884 */ 885 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 886 { 887 struct dmub_notification notify = {0}; 888 struct common_irq_params *irq_params = interrupt_params; 889 struct amdgpu_device *adev = irq_params->adev; 890 struct amdgpu_display_manager *dm = &adev->dm; 891 struct dmcub_trace_buf_entry entry = { 0 }; 892 u32 count = 0; 893 struct dmub_hpd_work *dmub_hpd_wrk; 894 static const char *const event_type[] = { 895 "NO_DATA", 896 "AUX_REPLY", 897 "HPD", 898 "HPD_IRQ", 899 "SET_CONFIGC_REPLY", 900 "DPIA_NOTIFICATION", 901 "HPD_SENSE_NOTIFY", 902 }; 903 904 do { 905 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 906 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 907 entry.param0, entry.param1); 908 909 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 910 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 911 } else 912 break; 913 914 count++; 915 916 } while (count <= DMUB_TRACE_MAX_READ); 917 918 if (count > DMUB_TRACE_MAX_READ) 919 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 920 921 if (dc_enable_dmub_notifications(adev->dm.dc) && 922 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 923 924 do { 925 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 926 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 927 DRM_ERROR("DM: notify type %d invalid!", notify.type); 928 continue; 929 } 930 if (!dm->dmub_callback[notify.type]) { 931 DRM_WARN("DMUB notification skipped due to no handler: type=%s\n", 932 event_type[notify.type]); 933 continue; 934 } 935 if (dm->dmub_thread_offload[notify.type] == true) { 936 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 937 if (!dmub_hpd_wrk) { 938 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 939 return; 940 } 941 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 942 GFP_ATOMIC); 943 if (!dmub_hpd_wrk->dmub_notify) { 944 kfree(dmub_hpd_wrk); 945 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 946 return; 947 } 948 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 949 dmub_hpd_wrk->adev = adev; 950 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 951 } else { 952 dm->dmub_callback[notify.type](adev, ¬ify); 953 } 954 } while (notify.pending_notification); 955 } 956 } 957 958 static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, 959 enum amd_clockgating_state state) 960 { 961 return 0; 962 } 963 964 static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, 965 enum amd_powergating_state state) 966 { 967 return 0; 968 } 969 970 /* Prototypes of private functions */ 971 static int dm_early_init(struct amdgpu_ip_block *ip_block); 972 973 /* Allocate memory for FBC compressed data */ 974 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 975 { 976 struct amdgpu_device *adev = drm_to_adev(connector->dev); 977 struct dm_compressor_info *compressor = &adev->dm.compressor; 978 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 979 struct drm_display_mode *mode; 980 unsigned long max_size = 0; 981 982 if (adev->dm.dc->fbc_compressor == NULL) 983 return; 984 985 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 986 return; 987 988 if (compressor->bo_ptr) 989 return; 990 991 992 list_for_each_entry(mode, &connector->modes, head) { 993 if (max_size < (unsigned long) mode->htotal * mode->vtotal) 994 max_size = (unsigned long) mode->htotal * mode->vtotal; 995 } 996 997 if (max_size) { 998 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 999 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 1000 &compressor->gpu_addr, &compressor->cpu_addr); 1001 1002 if (r) 1003 DRM_ERROR("DM: Failed to initialize FBC\n"); 1004 else { 1005 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 1006 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 1007 } 1008 1009 } 1010 1011 } 1012 1013 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 1014 int pipe, bool *enabled, 1015 unsigned char *buf, int max_bytes) 1016 { 1017 struct drm_device *dev = dev_get_drvdata(kdev); 1018 struct amdgpu_device *adev = drm_to_adev(dev); 1019 struct drm_connector *connector; 1020 struct drm_connector_list_iter conn_iter; 1021 struct amdgpu_dm_connector *aconnector; 1022 int ret = 0; 1023 1024 *enabled = false; 1025 1026 mutex_lock(&adev->dm.audio_lock); 1027 1028 drm_connector_list_iter_begin(dev, &conn_iter); 1029 drm_for_each_connector_iter(connector, &conn_iter) { 1030 1031 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1032 continue; 1033 1034 aconnector = to_amdgpu_dm_connector(connector); 1035 if (aconnector->audio_inst != port) 1036 continue; 1037 1038 *enabled = true; 1039 ret = drm_eld_size(connector->eld); 1040 memcpy(buf, connector->eld, min(max_bytes, ret)); 1041 1042 break; 1043 } 1044 drm_connector_list_iter_end(&conn_iter); 1045 1046 mutex_unlock(&adev->dm.audio_lock); 1047 1048 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 1049 1050 return ret; 1051 } 1052 1053 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 1054 .get_eld = amdgpu_dm_audio_component_get_eld, 1055 }; 1056 1057 static int amdgpu_dm_audio_component_bind(struct device *kdev, 1058 struct device *hda_kdev, void *data) 1059 { 1060 struct drm_device *dev = dev_get_drvdata(kdev); 1061 struct amdgpu_device *adev = drm_to_adev(dev); 1062 struct drm_audio_component *acomp = data; 1063 1064 acomp->ops = &amdgpu_dm_audio_component_ops; 1065 acomp->dev = kdev; 1066 adev->dm.audio_component = acomp; 1067 1068 return 0; 1069 } 1070 1071 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 1072 struct device *hda_kdev, void *data) 1073 { 1074 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); 1075 struct drm_audio_component *acomp = data; 1076 1077 acomp->ops = NULL; 1078 acomp->dev = NULL; 1079 adev->dm.audio_component = NULL; 1080 } 1081 1082 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1083 .bind = amdgpu_dm_audio_component_bind, 1084 .unbind = amdgpu_dm_audio_component_unbind, 1085 }; 1086 1087 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1088 { 1089 int i, ret; 1090 1091 if (!amdgpu_audio) 1092 return 0; 1093 1094 adev->mode_info.audio.enabled = true; 1095 1096 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1097 1098 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1099 adev->mode_info.audio.pin[i].channels = -1; 1100 adev->mode_info.audio.pin[i].rate = -1; 1101 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1102 adev->mode_info.audio.pin[i].status_bits = 0; 1103 adev->mode_info.audio.pin[i].category_code = 0; 1104 adev->mode_info.audio.pin[i].connected = false; 1105 adev->mode_info.audio.pin[i].id = 1106 adev->dm.dc->res_pool->audios[i]->inst; 1107 adev->mode_info.audio.pin[i].offset = 0; 1108 } 1109 1110 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1111 if (ret < 0) 1112 return ret; 1113 1114 adev->dm.audio_registered = true; 1115 1116 return 0; 1117 } 1118 1119 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1120 { 1121 if (!amdgpu_audio) 1122 return; 1123 1124 if (!adev->mode_info.audio.enabled) 1125 return; 1126 1127 if (adev->dm.audio_registered) { 1128 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1129 adev->dm.audio_registered = false; 1130 } 1131 1132 /* TODO: Disable audio? */ 1133 1134 adev->mode_info.audio.enabled = false; 1135 } 1136 1137 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1138 { 1139 struct drm_audio_component *acomp = adev->dm.audio_component; 1140 1141 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1142 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1143 1144 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1145 pin, -1); 1146 } 1147 } 1148 1149 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1150 { 1151 const struct dmcub_firmware_header_v1_0 *hdr; 1152 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1153 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1154 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1155 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1156 struct abm *abm = adev->dm.dc->res_pool->abm; 1157 struct dc_context *ctx = adev->dm.dc->ctx; 1158 struct dmub_srv_hw_params hw_params; 1159 enum dmub_status status; 1160 const unsigned char *fw_inst_const, *fw_bss_data; 1161 u32 i, fw_inst_const_size, fw_bss_data_size; 1162 bool has_hw_support; 1163 1164 if (!dmub_srv) 1165 /* DMUB isn't supported on the ASIC. */ 1166 return 0; 1167 1168 if (!fb_info) { 1169 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1170 return -EINVAL; 1171 } 1172 1173 if (!dmub_fw) { 1174 /* Firmware required for DMUB support. */ 1175 DRM_ERROR("No firmware provided for DMUB.\n"); 1176 return -EINVAL; 1177 } 1178 1179 /* initialize register offsets for ASICs with runtime initialization available */ 1180 if (dmub_srv->hw_funcs.init_reg_offsets) 1181 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1182 1183 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1184 if (status != DMUB_STATUS_OK) { 1185 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1186 return -EINVAL; 1187 } 1188 1189 if (!has_hw_support) { 1190 DRM_INFO("DMUB unsupported on ASIC\n"); 1191 return 0; 1192 } 1193 1194 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1195 status = dmub_srv_hw_reset(dmub_srv); 1196 if (status != DMUB_STATUS_OK) 1197 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1198 1199 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1200 1201 fw_inst_const = dmub_fw->data + 1202 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1203 PSP_HEADER_BYTES; 1204 1205 fw_bss_data = dmub_fw->data + 1206 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1207 le32_to_cpu(hdr->inst_const_bytes); 1208 1209 /* Copy firmware and bios info into FB memory. */ 1210 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1211 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1212 1213 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1214 1215 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1216 * amdgpu_ucode_init_single_fw will load dmub firmware 1217 * fw_inst_const part to cw0; otherwise, the firmware back door load 1218 * will be done by dm_dmub_hw_init 1219 */ 1220 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1221 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1222 fw_inst_const_size); 1223 } 1224 1225 if (fw_bss_data_size) 1226 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1227 fw_bss_data, fw_bss_data_size); 1228 1229 /* Copy firmware bios info into FB memory. */ 1230 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1231 adev->bios_size); 1232 1233 /* Reset regions that need to be reset. */ 1234 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1235 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1236 1237 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1238 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1239 1240 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1241 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1242 1243 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, 1244 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); 1245 1246 /* Initialize hardware. */ 1247 memset(&hw_params, 0, sizeof(hw_params)); 1248 hw_params.fb_base = adev->gmc.fb_start; 1249 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1250 1251 /* backdoor load firmware and trigger dmub running */ 1252 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1253 hw_params.load_inst_const = true; 1254 1255 if (dmcu) 1256 hw_params.psp_version = dmcu->psp_version; 1257 1258 for (i = 0; i < fb_info->num_fb; ++i) 1259 hw_params.fb[i] = &fb_info->fb[i]; 1260 1261 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1262 case IP_VERSION(3, 1, 3): 1263 case IP_VERSION(3, 1, 4): 1264 case IP_VERSION(3, 5, 0): 1265 case IP_VERSION(3, 5, 1): 1266 case IP_VERSION(4, 0, 1): 1267 hw_params.dpia_supported = true; 1268 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1269 break; 1270 default: 1271 break; 1272 } 1273 1274 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1275 case IP_VERSION(3, 5, 0): 1276 case IP_VERSION(3, 5, 1): 1277 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1278 break; 1279 default: 1280 break; 1281 } 1282 1283 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1284 if (status != DMUB_STATUS_OK) { 1285 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1286 return -EINVAL; 1287 } 1288 1289 /* Wait for firmware load to finish. */ 1290 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1291 if (status != DMUB_STATUS_OK) 1292 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1293 1294 /* Init DMCU and ABM if available. */ 1295 if (dmcu && abm) { 1296 dmcu->funcs->dmcu_init(dmcu); 1297 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1298 } 1299 1300 if (!adev->dm.dc->ctx->dmub_srv) 1301 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1302 if (!adev->dm.dc->ctx->dmub_srv) { 1303 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1304 return -ENOMEM; 1305 } 1306 1307 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1308 adev->dm.dmcub_fw_version); 1309 1310 /* Keeping sanity checks off if 1311 * DCN31 >= 4.0.59.0 1312 * DCN314 >= 8.0.16.0 1313 * Otherwise, turn on sanity checks 1314 */ 1315 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1316 case IP_VERSION(3, 1, 2): 1317 case IP_VERSION(3, 1, 3): 1318 if (adev->dm.dmcub_fw_version && 1319 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1320 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) 1321 adev->dm.dc->debug.sanity_checks = true; 1322 break; 1323 case IP_VERSION(3, 1, 4): 1324 if (adev->dm.dmcub_fw_version && 1325 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1326 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) 1327 adev->dm.dc->debug.sanity_checks = true; 1328 break; 1329 default: 1330 break; 1331 } 1332 1333 return 0; 1334 } 1335 1336 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1337 { 1338 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1339 enum dmub_status status; 1340 bool init; 1341 int r; 1342 1343 if (!dmub_srv) { 1344 /* DMUB isn't supported on the ASIC. */ 1345 return; 1346 } 1347 1348 status = dmub_srv_is_hw_init(dmub_srv, &init); 1349 if (status != DMUB_STATUS_OK) 1350 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1351 1352 if (status == DMUB_STATUS_OK && init) { 1353 /* Wait for firmware load to finish. */ 1354 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1355 if (status != DMUB_STATUS_OK) 1356 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1357 } else { 1358 /* Perform the full hardware initialization. */ 1359 r = dm_dmub_hw_init(adev); 1360 if (r) 1361 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1362 } 1363 } 1364 1365 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1366 { 1367 u64 pt_base; 1368 u32 logical_addr_low; 1369 u32 logical_addr_high; 1370 u32 agp_base, agp_bot, agp_top; 1371 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1372 1373 memset(pa_config, 0, sizeof(*pa_config)); 1374 1375 agp_base = 0; 1376 agp_bot = adev->gmc.agp_start >> 24; 1377 agp_top = adev->gmc.agp_end >> 24; 1378 1379 /* AGP aperture is disabled */ 1380 if (agp_bot > agp_top) { 1381 logical_addr_low = adev->gmc.fb_start >> 18; 1382 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1383 AMD_APU_IS_RENOIR | 1384 AMD_APU_IS_GREEN_SARDINE)) 1385 /* 1386 * Raven2 has a HW issue that it is unable to use the vram which 1387 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1388 * workaround that increase system aperture high address (add 1) 1389 * to get rid of the VM fault and hardware hang. 1390 */ 1391 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1392 else 1393 logical_addr_high = adev->gmc.fb_end >> 18; 1394 } else { 1395 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1396 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1397 AMD_APU_IS_RENOIR | 1398 AMD_APU_IS_GREEN_SARDINE)) 1399 /* 1400 * Raven2 has a HW issue that it is unable to use the vram which 1401 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1402 * workaround that increase system aperture high address (add 1) 1403 * to get rid of the VM fault and hardware hang. 1404 */ 1405 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1406 else 1407 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1408 } 1409 1410 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1411 1412 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1413 AMDGPU_GPU_PAGE_SHIFT); 1414 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1415 AMDGPU_GPU_PAGE_SHIFT); 1416 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1417 AMDGPU_GPU_PAGE_SHIFT); 1418 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1419 AMDGPU_GPU_PAGE_SHIFT); 1420 page_table_base.high_part = upper_32_bits(pt_base); 1421 page_table_base.low_part = lower_32_bits(pt_base); 1422 1423 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1424 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1425 1426 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1427 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1428 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1429 1430 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1431 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1432 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1433 1434 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1435 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1436 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1437 1438 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1439 1440 } 1441 1442 static void force_connector_state( 1443 struct amdgpu_dm_connector *aconnector, 1444 enum drm_connector_force force_state) 1445 { 1446 struct drm_connector *connector = &aconnector->base; 1447 1448 mutex_lock(&connector->dev->mode_config.mutex); 1449 aconnector->base.force = force_state; 1450 mutex_unlock(&connector->dev->mode_config.mutex); 1451 1452 mutex_lock(&aconnector->hpd_lock); 1453 drm_kms_helper_connector_hotplug_event(connector); 1454 mutex_unlock(&aconnector->hpd_lock); 1455 } 1456 1457 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1458 { 1459 struct hpd_rx_irq_offload_work *offload_work; 1460 struct amdgpu_dm_connector *aconnector; 1461 struct dc_link *dc_link; 1462 struct amdgpu_device *adev; 1463 enum dc_connection_type new_connection_type = dc_connection_none; 1464 unsigned long flags; 1465 union test_response test_response; 1466 1467 memset(&test_response, 0, sizeof(test_response)); 1468 1469 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1470 aconnector = offload_work->offload_wq->aconnector; 1471 1472 if (!aconnector) { 1473 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1474 goto skip; 1475 } 1476 1477 adev = drm_to_adev(aconnector->base.dev); 1478 dc_link = aconnector->dc_link; 1479 1480 mutex_lock(&aconnector->hpd_lock); 1481 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1482 DRM_ERROR("KMS: Failed to detect connector\n"); 1483 mutex_unlock(&aconnector->hpd_lock); 1484 1485 if (new_connection_type == dc_connection_none) 1486 goto skip; 1487 1488 if (amdgpu_in_reset(adev)) 1489 goto skip; 1490 1491 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1492 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1493 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1494 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1495 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1496 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1497 goto skip; 1498 } 1499 1500 mutex_lock(&adev->dm.dc_lock); 1501 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1502 dc_link_dp_handle_automated_test(dc_link); 1503 1504 if (aconnector->timing_changed) { 1505 /* force connector disconnect and reconnect */ 1506 force_connector_state(aconnector, DRM_FORCE_OFF); 1507 msleep(100); 1508 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1509 } 1510 1511 test_response.bits.ACK = 1; 1512 1513 core_link_write_dpcd( 1514 dc_link, 1515 DP_TEST_RESPONSE, 1516 &test_response.raw, 1517 sizeof(test_response)); 1518 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1519 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1520 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1521 /* offload_work->data is from handle_hpd_rx_irq-> 1522 * schedule_hpd_rx_offload_work.this is defer handle 1523 * for hpd short pulse. upon here, link status may be 1524 * changed, need get latest link status from dpcd 1525 * registers. if link status is good, skip run link 1526 * training again. 1527 */ 1528 union hpd_irq_data irq_data; 1529 1530 memset(&irq_data, 0, sizeof(irq_data)); 1531 1532 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1533 * request be added to work queue if link lost at end of dc_link_ 1534 * dp_handle_link_loss 1535 */ 1536 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1537 offload_work->offload_wq->is_handling_link_loss = false; 1538 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1539 1540 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1541 dc_link_check_link_loss_status(dc_link, &irq_data)) 1542 dc_link_dp_handle_link_loss(dc_link); 1543 } 1544 mutex_unlock(&adev->dm.dc_lock); 1545 1546 skip: 1547 kfree(offload_work); 1548 1549 } 1550 1551 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1552 { 1553 int max_caps = dc->caps.max_links; 1554 int i = 0; 1555 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1556 1557 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1558 1559 if (!hpd_rx_offload_wq) 1560 return NULL; 1561 1562 1563 for (i = 0; i < max_caps; i++) { 1564 hpd_rx_offload_wq[i].wq = 1565 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1566 1567 if (hpd_rx_offload_wq[i].wq == NULL) { 1568 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1569 goto out_err; 1570 } 1571 1572 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1573 } 1574 1575 return hpd_rx_offload_wq; 1576 1577 out_err: 1578 for (i = 0; i < max_caps; i++) { 1579 if (hpd_rx_offload_wq[i].wq) 1580 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1581 } 1582 kfree(hpd_rx_offload_wq); 1583 return NULL; 1584 } 1585 1586 struct amdgpu_stutter_quirk { 1587 u16 chip_vendor; 1588 u16 chip_device; 1589 u16 subsys_vendor; 1590 u16 subsys_device; 1591 u8 revision; 1592 }; 1593 1594 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1595 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1596 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1597 { 0, 0, 0, 0, 0 }, 1598 }; 1599 1600 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1601 { 1602 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1603 1604 while (p && p->chip_device != 0) { 1605 if (pdev->vendor == p->chip_vendor && 1606 pdev->device == p->chip_device && 1607 pdev->subsystem_vendor == p->subsys_vendor && 1608 pdev->subsystem_device == p->subsys_device && 1609 pdev->revision == p->revision) { 1610 return true; 1611 } 1612 ++p; 1613 } 1614 return false; 1615 } 1616 1617 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1618 { 1619 .matches = { 1620 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1621 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1622 }, 1623 }, 1624 { 1625 .matches = { 1626 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1627 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1628 }, 1629 }, 1630 { 1631 .matches = { 1632 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1633 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1634 }, 1635 }, 1636 { 1637 .matches = { 1638 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1639 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1640 }, 1641 }, 1642 { 1643 .matches = { 1644 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1645 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1646 }, 1647 }, 1648 { 1649 .matches = { 1650 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1651 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1652 }, 1653 }, 1654 { 1655 .matches = { 1656 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1657 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1658 }, 1659 }, 1660 { 1661 .matches = { 1662 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1663 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1664 }, 1665 }, 1666 { 1667 .matches = { 1668 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1669 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1670 }, 1671 }, 1672 {} 1673 /* TODO: refactor this from a fixed table to a dynamic option */ 1674 }; 1675 1676 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1677 { 1678 const struct dmi_system_id *dmi_id; 1679 1680 dm->aux_hpd_discon_quirk = false; 1681 1682 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1683 if (dmi_id) { 1684 dm->aux_hpd_discon_quirk = true; 1685 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1686 } 1687 } 1688 1689 void* 1690 dm_allocate_gpu_mem( 1691 struct amdgpu_device *adev, 1692 enum dc_gpu_mem_alloc_type type, 1693 size_t size, 1694 long long *addr) 1695 { 1696 struct dal_allocation *da; 1697 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1698 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1699 int ret; 1700 1701 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 1702 if (!da) 1703 return NULL; 1704 1705 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1706 domain, &da->bo, 1707 &da->gpu_addr, &da->cpu_ptr); 1708 1709 *addr = da->gpu_addr; 1710 1711 if (ret) { 1712 kfree(da); 1713 return NULL; 1714 } 1715 1716 /* add da to list in dm */ 1717 list_add(&da->list, &adev->dm.da_list); 1718 1719 return da->cpu_ptr; 1720 } 1721 1722 void 1723 dm_free_gpu_mem( 1724 struct amdgpu_device *adev, 1725 enum dc_gpu_mem_alloc_type type, 1726 void *pvMem) 1727 { 1728 struct dal_allocation *da; 1729 1730 /* walk the da list in DM */ 1731 list_for_each_entry(da, &adev->dm.da_list, list) { 1732 if (pvMem == da->cpu_ptr) { 1733 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1734 list_del(&da->list); 1735 kfree(da); 1736 break; 1737 } 1738 } 1739 1740 } 1741 1742 static enum dmub_status 1743 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, 1744 enum dmub_gpint_command command_code, 1745 uint16_t param, 1746 uint32_t timeout_us) 1747 { 1748 union dmub_gpint_data_register reg, test; 1749 uint32_t i; 1750 1751 /* Assume that VBIOS DMUB is ready to take commands */ 1752 1753 reg.bits.status = 1; 1754 reg.bits.command_code = command_code; 1755 reg.bits.param = param; 1756 1757 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); 1758 1759 for (i = 0; i < timeout_us; ++i) { 1760 udelay(1); 1761 1762 /* Check if our GPINT got acked */ 1763 reg.bits.status = 0; 1764 test = (union dmub_gpint_data_register) 1765 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); 1766 1767 if (test.all == reg.all) 1768 return DMUB_STATUS_OK; 1769 } 1770 1771 return DMUB_STATUS_TIMEOUT; 1772 } 1773 1774 static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) 1775 { 1776 struct dml2_soc_bb *bb; 1777 long long addr; 1778 int i = 0; 1779 uint16_t chunk; 1780 enum dmub_gpint_command send_addrs[] = { 1781 DMUB_GPINT__SET_BB_ADDR_WORD0, 1782 DMUB_GPINT__SET_BB_ADDR_WORD1, 1783 DMUB_GPINT__SET_BB_ADDR_WORD2, 1784 DMUB_GPINT__SET_BB_ADDR_WORD3, 1785 }; 1786 enum dmub_status ret; 1787 1788 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1789 case IP_VERSION(4, 0, 1): 1790 break; 1791 default: 1792 return NULL; 1793 } 1794 1795 bb = dm_allocate_gpu_mem(adev, 1796 DC_MEM_ALLOC_TYPE_GART, 1797 sizeof(struct dml2_soc_bb), 1798 &addr); 1799 if (!bb) 1800 return NULL; 1801 1802 for (i = 0; i < 4; i++) { 1803 /* Extract 16-bit chunk */ 1804 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; 1805 /* Send the chunk */ 1806 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); 1807 if (ret != DMUB_STATUS_OK) 1808 goto free_bb; 1809 } 1810 1811 /* Now ask DMUB to copy the bb */ 1812 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); 1813 if (ret != DMUB_STATUS_OK) 1814 goto free_bb; 1815 1816 return bb; 1817 1818 free_bb: 1819 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); 1820 return NULL; 1821 1822 } 1823 1824 static enum dmub_ips_disable_type dm_get_default_ips_mode( 1825 struct amdgpu_device *adev) 1826 { 1827 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; 1828 1829 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1830 case IP_VERSION(3, 5, 0): 1831 /* 1832 * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to 1833 * cause a hard hang. A fix exists for newer PMFW. 1834 * 1835 * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest 1836 * IPS state in all cases, except for s0ix and all displays off (DPMS), 1837 * where IPS2 is allowed. 1838 * 1839 * When checking pmfw version, use the major and minor only. 1840 */ 1841 if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) 1842 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1843 else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) 1844 /* 1845 * Other ASICs with DCN35 that have residency issues with 1846 * IPS2 in idle. 1847 * We want them to use IPS2 only in display off cases. 1848 */ 1849 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1850 break; 1851 case IP_VERSION(3, 5, 1): 1852 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1853 break; 1854 default: 1855 /* ASICs older than DCN35 do not have IPSs */ 1856 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) 1857 ret = DMUB_IPS_DISABLE_ALL; 1858 break; 1859 } 1860 1861 return ret; 1862 } 1863 1864 static int amdgpu_dm_init(struct amdgpu_device *adev) 1865 { 1866 struct dc_init_data init_data; 1867 struct dc_callback_init init_params; 1868 int r; 1869 1870 adev->dm.ddev = adev_to_drm(adev); 1871 adev->dm.adev = adev; 1872 1873 /* Zero all the fields */ 1874 memset(&init_data, 0, sizeof(init_data)); 1875 memset(&init_params, 0, sizeof(init_params)); 1876 1877 mutex_init(&adev->dm.dpia_aux_lock); 1878 mutex_init(&adev->dm.dc_lock); 1879 mutex_init(&adev->dm.audio_lock); 1880 1881 if (amdgpu_dm_irq_init(adev)) { 1882 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1883 goto error; 1884 } 1885 1886 init_data.asic_id.chip_family = adev->family; 1887 1888 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1889 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1890 init_data.asic_id.chip_id = adev->pdev->device; 1891 1892 init_data.asic_id.vram_width = adev->gmc.vram_width; 1893 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1894 init_data.asic_id.atombios_base_address = 1895 adev->mode_info.atom_context->bios; 1896 1897 init_data.driver = adev; 1898 1899 /* cgs_device was created in dm_sw_init() */ 1900 init_data.cgs_device = adev->dm.cgs_device; 1901 1902 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1903 1904 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1905 case IP_VERSION(2, 1, 0): 1906 switch (adev->dm.dmcub_fw_version) { 1907 case 0: /* development */ 1908 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1909 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1910 init_data.flags.disable_dmcu = false; 1911 break; 1912 default: 1913 init_data.flags.disable_dmcu = true; 1914 } 1915 break; 1916 case IP_VERSION(2, 0, 3): 1917 init_data.flags.disable_dmcu = true; 1918 break; 1919 default: 1920 break; 1921 } 1922 1923 /* APU support S/G display by default except: 1924 * ASICs before Carrizo, 1925 * RAVEN1 (Users reported stability issue) 1926 */ 1927 1928 if (adev->asic_type < CHIP_CARRIZO) { 1929 init_data.flags.gpu_vm_support = false; 1930 } else if (adev->asic_type == CHIP_RAVEN) { 1931 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1932 init_data.flags.gpu_vm_support = false; 1933 else 1934 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1935 } else { 1936 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) 1937 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); 1938 else 1939 init_data.flags.gpu_vm_support = 1940 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1941 } 1942 1943 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1944 1945 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1946 init_data.flags.fbc_support = true; 1947 1948 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1949 init_data.flags.multi_mon_pp_mclk_switch = true; 1950 1951 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1952 init_data.flags.disable_fractional_pwm = true; 1953 1954 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1955 init_data.flags.edp_no_power_sequencing = true; 1956 1957 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1958 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1959 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1960 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1961 1962 init_data.flags.seamless_boot_edp_requested = false; 1963 1964 if (amdgpu_device_seamless_boot_supported(adev)) { 1965 init_data.flags.seamless_boot_edp_requested = true; 1966 init_data.flags.allow_seamless_boot_optimization = true; 1967 DRM_INFO("Seamless boot condition check passed\n"); 1968 } 1969 1970 init_data.flags.enable_mipi_converter_optimization = true; 1971 1972 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1973 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1974 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1975 1976 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) 1977 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; 1978 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) 1979 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; 1980 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) 1981 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1982 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) 1983 init_data.flags.disable_ips = DMUB_IPS_ENABLE; 1984 else 1985 init_data.flags.disable_ips = dm_get_default_ips_mode(adev); 1986 1987 init_data.flags.disable_ips_in_vpb = 0; 1988 1989 /* Enable DWB for tested platforms only */ 1990 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) 1991 init_data.num_virtual_links = 1; 1992 1993 retrieve_dmi_info(&adev->dm); 1994 1995 if (adev->dm.bb_from_dmub) 1996 init_data.bb_from_dmub = adev->dm.bb_from_dmub; 1997 else 1998 init_data.bb_from_dmub = NULL; 1999 2000 /* Display Core create. */ 2001 adev->dm.dc = dc_create(&init_data); 2002 2003 if (adev->dm.dc) { 2004 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 2005 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 2006 } else { 2007 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 2008 goto error; 2009 } 2010 2011 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 2012 adev->dm.dc->debug.force_single_disp_pipe_split = false; 2013 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 2014 } 2015 2016 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2017 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2018 if (dm_should_disable_stutter(adev->pdev)) 2019 adev->dm.dc->debug.disable_stutter = true; 2020 2021 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 2022 adev->dm.dc->debug.disable_stutter = true; 2023 2024 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 2025 adev->dm.dc->debug.disable_dsc = true; 2026 2027 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 2028 adev->dm.dc->debug.disable_clock_gate = true; 2029 2030 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2031 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2032 2033 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2034 adev->dm.dc->debug.using_dml2 = true; 2035 adev->dm.dc->debug.using_dml21 = true; 2036 } 2037 2038 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2039 2040 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 2041 adev->dm.dc->debug.ignore_cable_id = true; 2042 2043 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 2044 DRM_INFO("DP-HDMI FRL PCON supported\n"); 2045 2046 r = dm_dmub_hw_init(adev); 2047 if (r) { 2048 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2049 goto error; 2050 } 2051 2052 dc_hardware_init(adev->dm.dc); 2053 2054 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 2055 if (!adev->dm.hpd_rx_offload_wq) { 2056 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 2057 goto error; 2058 } 2059 2060 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 2061 struct dc_phy_addr_space_config pa_config; 2062 2063 mmhub_read_system_context(adev, &pa_config); 2064 2065 // Call the DC init_memory func 2066 dc_setup_system_context(adev->dm.dc, &pa_config); 2067 } 2068 2069 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 2070 if (!adev->dm.freesync_module) { 2071 DRM_ERROR( 2072 "amdgpu: failed to initialize freesync_module.\n"); 2073 } else 2074 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 2075 adev->dm.freesync_module); 2076 2077 amdgpu_dm_init_color_mod(); 2078 2079 if (adev->dm.dc->caps.max_links > 0) { 2080 adev->dm.vblank_control_workqueue = 2081 create_singlethread_workqueue("dm_vblank_control_workqueue"); 2082 if (!adev->dm.vblank_control_workqueue) 2083 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 2084 } 2085 2086 if (adev->dm.dc->caps.ips_support && 2087 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) 2088 adev->dm.idle_workqueue = idle_create_workqueue(adev); 2089 2090 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 2091 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 2092 2093 if (!adev->dm.hdcp_workqueue) 2094 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 2095 else 2096 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 2097 2098 dc_init_callbacks(adev->dm.dc, &init_params); 2099 } 2100 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2101 init_completion(&adev->dm.dmub_aux_transfer_done); 2102 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 2103 if (!adev->dm.dmub_notify) { 2104 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 2105 goto error; 2106 } 2107 2108 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 2109 if (!adev->dm.delayed_hpd_wq) { 2110 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 2111 goto error; 2112 } 2113 2114 amdgpu_dm_outbox_init(adev); 2115 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2116 dmub_aux_setconfig_callback, false)) { 2117 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 2118 goto error; 2119 } 2120 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 2121 * It is expected that DMUB will resend any pending notifications at this point. Note 2122 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 2123 * align legacy interface initialization sequence. Connection status will be proactivly 2124 * detected once in the amdgpu_dm_initialize_drm_device. 2125 */ 2126 dc_enable_dmub_outbox(adev->dm.dc); 2127 2128 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 2129 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 2130 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 2131 } 2132 2133 if (amdgpu_dm_initialize_drm_device(adev)) { 2134 DRM_ERROR( 2135 "amdgpu: failed to initialize sw for display support.\n"); 2136 goto error; 2137 } 2138 2139 /* create fake encoders for MST */ 2140 dm_dp_create_fake_mst_encoders(adev); 2141 2142 /* TODO: Add_display_info? */ 2143 2144 /* TODO use dynamic cursor width */ 2145 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 2146 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 2147 2148 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 2149 DRM_ERROR( 2150 "amdgpu: failed to initialize sw for display support.\n"); 2151 goto error; 2152 } 2153 2154 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2155 amdgpu_dm_crtc_secure_display_create_contexts(adev); 2156 if (!adev->dm.secure_display_ctx.crtc_ctx) 2157 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 2158 #endif 2159 2160 DRM_DEBUG_DRIVER("KMS initialized.\n"); 2161 2162 return 0; 2163 error: 2164 amdgpu_dm_fini(adev); 2165 2166 return -EINVAL; 2167 } 2168 2169 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) 2170 { 2171 struct amdgpu_device *adev = ip_block->adev; 2172 2173 amdgpu_dm_audio_fini(adev); 2174 2175 return 0; 2176 } 2177 2178 static void amdgpu_dm_fini(struct amdgpu_device *adev) 2179 { 2180 int i; 2181 2182 if (adev->dm.vblank_control_workqueue) { 2183 destroy_workqueue(adev->dm.vblank_control_workqueue); 2184 adev->dm.vblank_control_workqueue = NULL; 2185 } 2186 2187 if (adev->dm.idle_workqueue) { 2188 if (adev->dm.idle_workqueue->running) { 2189 adev->dm.idle_workqueue->enable = false; 2190 flush_work(&adev->dm.idle_workqueue->work); 2191 } 2192 2193 kfree(adev->dm.idle_workqueue); 2194 adev->dm.idle_workqueue = NULL; 2195 } 2196 2197 amdgpu_dm_destroy_drm_device(&adev->dm); 2198 2199 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2200 if (adev->dm.secure_display_ctx.crtc_ctx) { 2201 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2202 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { 2203 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); 2204 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); 2205 } 2206 } 2207 kfree(adev->dm.secure_display_ctx.crtc_ctx); 2208 adev->dm.secure_display_ctx.crtc_ctx = NULL; 2209 } 2210 #endif 2211 if (adev->dm.hdcp_workqueue) { 2212 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 2213 adev->dm.hdcp_workqueue = NULL; 2214 } 2215 2216 if (adev->dm.dc) { 2217 dc_deinit_callbacks(adev->dm.dc); 2218 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 2219 if (dc_enable_dmub_notifications(adev->dm.dc)) { 2220 kfree(adev->dm.dmub_notify); 2221 adev->dm.dmub_notify = NULL; 2222 destroy_workqueue(adev->dm.delayed_hpd_wq); 2223 adev->dm.delayed_hpd_wq = NULL; 2224 } 2225 } 2226 2227 if (adev->dm.dmub_bo) 2228 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 2229 &adev->dm.dmub_bo_gpu_addr, 2230 &adev->dm.dmub_bo_cpu_addr); 2231 2232 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { 2233 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 2234 if (adev->dm.hpd_rx_offload_wq[i].wq) { 2235 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 2236 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 2237 } 2238 } 2239 2240 kfree(adev->dm.hpd_rx_offload_wq); 2241 adev->dm.hpd_rx_offload_wq = NULL; 2242 } 2243 2244 /* DC Destroy TODO: Replace destroy DAL */ 2245 if (adev->dm.dc) 2246 dc_destroy(&adev->dm.dc); 2247 /* 2248 * TODO: pageflip, vlank interrupt 2249 * 2250 * amdgpu_dm_irq_fini(adev); 2251 */ 2252 2253 if (adev->dm.cgs_device) { 2254 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 2255 adev->dm.cgs_device = NULL; 2256 } 2257 if (adev->dm.freesync_module) { 2258 mod_freesync_destroy(adev->dm.freesync_module); 2259 adev->dm.freesync_module = NULL; 2260 } 2261 2262 mutex_destroy(&adev->dm.audio_lock); 2263 mutex_destroy(&adev->dm.dc_lock); 2264 mutex_destroy(&adev->dm.dpia_aux_lock); 2265 } 2266 2267 static int load_dmcu_fw(struct amdgpu_device *adev) 2268 { 2269 const char *fw_name_dmcu = NULL; 2270 int r; 2271 const struct dmcu_firmware_header_v1_0 *hdr; 2272 2273 switch (adev->asic_type) { 2274 #if defined(CONFIG_DRM_AMD_DC_SI) 2275 case CHIP_TAHITI: 2276 case CHIP_PITCAIRN: 2277 case CHIP_VERDE: 2278 case CHIP_OLAND: 2279 #endif 2280 case CHIP_BONAIRE: 2281 case CHIP_HAWAII: 2282 case CHIP_KAVERI: 2283 case CHIP_KABINI: 2284 case CHIP_MULLINS: 2285 case CHIP_TONGA: 2286 case CHIP_FIJI: 2287 case CHIP_CARRIZO: 2288 case CHIP_STONEY: 2289 case CHIP_POLARIS11: 2290 case CHIP_POLARIS10: 2291 case CHIP_POLARIS12: 2292 case CHIP_VEGAM: 2293 case CHIP_VEGA10: 2294 case CHIP_VEGA12: 2295 case CHIP_VEGA20: 2296 return 0; 2297 case CHIP_NAVI12: 2298 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 2299 break; 2300 case CHIP_RAVEN: 2301 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2302 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2303 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2304 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2305 else 2306 return 0; 2307 break; 2308 default: 2309 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2310 case IP_VERSION(2, 0, 2): 2311 case IP_VERSION(2, 0, 3): 2312 case IP_VERSION(2, 0, 0): 2313 case IP_VERSION(2, 1, 0): 2314 case IP_VERSION(3, 0, 0): 2315 case IP_VERSION(3, 0, 2): 2316 case IP_VERSION(3, 0, 3): 2317 case IP_VERSION(3, 0, 1): 2318 case IP_VERSION(3, 1, 2): 2319 case IP_VERSION(3, 1, 3): 2320 case IP_VERSION(3, 1, 4): 2321 case IP_VERSION(3, 1, 5): 2322 case IP_VERSION(3, 1, 6): 2323 case IP_VERSION(3, 2, 0): 2324 case IP_VERSION(3, 2, 1): 2325 case IP_VERSION(3, 5, 0): 2326 case IP_VERSION(3, 5, 1): 2327 case IP_VERSION(4, 0, 1): 2328 return 0; 2329 default: 2330 break; 2331 } 2332 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2333 return -EINVAL; 2334 } 2335 2336 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2337 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2338 return 0; 2339 } 2340 2341 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, 2342 "%s", fw_name_dmcu); 2343 if (r == -ENODEV) { 2344 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2345 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2346 adev->dm.fw_dmcu = NULL; 2347 return 0; 2348 } 2349 if (r) { 2350 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2351 fw_name_dmcu); 2352 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2353 return r; 2354 } 2355 2356 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2357 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2358 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2359 adev->firmware.fw_size += 2360 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2361 2362 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2363 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2364 adev->firmware.fw_size += 2365 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2366 2367 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2368 2369 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2370 2371 return 0; 2372 } 2373 2374 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2375 { 2376 struct amdgpu_device *adev = ctx; 2377 2378 return dm_read_reg(adev->dm.dc->ctx, address); 2379 } 2380 2381 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2382 uint32_t value) 2383 { 2384 struct amdgpu_device *adev = ctx; 2385 2386 return dm_write_reg(adev->dm.dc->ctx, address, value); 2387 } 2388 2389 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2390 { 2391 struct dmub_srv_create_params create_params; 2392 struct dmub_srv_region_params region_params; 2393 struct dmub_srv_region_info region_info; 2394 struct dmub_srv_memory_params memory_params; 2395 struct dmub_srv_fb_info *fb_info; 2396 struct dmub_srv *dmub_srv; 2397 const struct dmcub_firmware_header_v1_0 *hdr; 2398 enum dmub_asic dmub_asic; 2399 enum dmub_status status; 2400 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { 2401 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST 2402 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK 2403 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA 2404 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS 2405 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX 2406 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF 2407 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE 2408 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM 2409 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE 2410 }; 2411 int r; 2412 2413 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2414 case IP_VERSION(2, 1, 0): 2415 dmub_asic = DMUB_ASIC_DCN21; 2416 break; 2417 case IP_VERSION(3, 0, 0): 2418 dmub_asic = DMUB_ASIC_DCN30; 2419 break; 2420 case IP_VERSION(3, 0, 1): 2421 dmub_asic = DMUB_ASIC_DCN301; 2422 break; 2423 case IP_VERSION(3, 0, 2): 2424 dmub_asic = DMUB_ASIC_DCN302; 2425 break; 2426 case IP_VERSION(3, 0, 3): 2427 dmub_asic = DMUB_ASIC_DCN303; 2428 break; 2429 case IP_VERSION(3, 1, 2): 2430 case IP_VERSION(3, 1, 3): 2431 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2432 break; 2433 case IP_VERSION(3, 1, 4): 2434 dmub_asic = DMUB_ASIC_DCN314; 2435 break; 2436 case IP_VERSION(3, 1, 5): 2437 dmub_asic = DMUB_ASIC_DCN315; 2438 break; 2439 case IP_VERSION(3, 1, 6): 2440 dmub_asic = DMUB_ASIC_DCN316; 2441 break; 2442 case IP_VERSION(3, 2, 0): 2443 dmub_asic = DMUB_ASIC_DCN32; 2444 break; 2445 case IP_VERSION(3, 2, 1): 2446 dmub_asic = DMUB_ASIC_DCN321; 2447 break; 2448 case IP_VERSION(3, 5, 0): 2449 case IP_VERSION(3, 5, 1): 2450 dmub_asic = DMUB_ASIC_DCN35; 2451 break; 2452 case IP_VERSION(4, 0, 1): 2453 dmub_asic = DMUB_ASIC_DCN401; 2454 break; 2455 2456 default: 2457 /* ASIC doesn't support DMUB. */ 2458 return 0; 2459 } 2460 2461 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2462 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2463 2464 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2465 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2466 AMDGPU_UCODE_ID_DMCUB; 2467 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2468 adev->dm.dmub_fw; 2469 adev->firmware.fw_size += 2470 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2471 2472 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2473 adev->dm.dmcub_fw_version); 2474 } 2475 2476 2477 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2478 dmub_srv = adev->dm.dmub_srv; 2479 2480 if (!dmub_srv) { 2481 DRM_ERROR("Failed to allocate DMUB service!\n"); 2482 return -ENOMEM; 2483 } 2484 2485 memset(&create_params, 0, sizeof(create_params)); 2486 create_params.user_ctx = adev; 2487 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2488 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2489 create_params.asic = dmub_asic; 2490 2491 /* Create the DMUB service. */ 2492 status = dmub_srv_create(dmub_srv, &create_params); 2493 if (status != DMUB_STATUS_OK) { 2494 DRM_ERROR("Error creating DMUB service: %d\n", status); 2495 return -EINVAL; 2496 } 2497 2498 /* Calculate the size of all the regions for the DMUB service. */ 2499 memset(®ion_params, 0, sizeof(region_params)); 2500 2501 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2502 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2503 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2504 region_params.vbios_size = adev->bios_size; 2505 region_params.fw_bss_data = region_params.bss_data_size ? 2506 adev->dm.dmub_fw->data + 2507 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2508 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2509 region_params.fw_inst_const = 2510 adev->dm.dmub_fw->data + 2511 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2512 PSP_HEADER_BYTES; 2513 region_params.window_memory_type = window_memory_type; 2514 2515 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2516 ®ion_info); 2517 2518 if (status != DMUB_STATUS_OK) { 2519 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2520 return -EINVAL; 2521 } 2522 2523 /* 2524 * Allocate a framebuffer based on the total size of all the regions. 2525 * TODO: Move this into GART. 2526 */ 2527 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2528 AMDGPU_GEM_DOMAIN_VRAM | 2529 AMDGPU_GEM_DOMAIN_GTT, 2530 &adev->dm.dmub_bo, 2531 &adev->dm.dmub_bo_gpu_addr, 2532 &adev->dm.dmub_bo_cpu_addr); 2533 if (r) 2534 return r; 2535 2536 /* Rebase the regions on the framebuffer address. */ 2537 memset(&memory_params, 0, sizeof(memory_params)); 2538 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2539 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2540 memory_params.region_info = ®ion_info; 2541 memory_params.window_memory_type = window_memory_type; 2542 2543 adev->dm.dmub_fb_info = 2544 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2545 fb_info = adev->dm.dmub_fb_info; 2546 2547 if (!fb_info) { 2548 DRM_ERROR( 2549 "Failed to allocate framebuffer info for DMUB service!\n"); 2550 return -ENOMEM; 2551 } 2552 2553 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2554 if (status != DMUB_STATUS_OK) { 2555 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2556 return -EINVAL; 2557 } 2558 2559 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); 2560 2561 return 0; 2562 } 2563 2564 static int dm_sw_init(struct amdgpu_ip_block *ip_block) 2565 { 2566 struct amdgpu_device *adev = ip_block->adev; 2567 int r; 2568 2569 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 2570 2571 if (!adev->dm.cgs_device) { 2572 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 2573 return -EINVAL; 2574 } 2575 2576 /* Moved from dm init since we need to use allocations for storing bounding box data */ 2577 INIT_LIST_HEAD(&adev->dm.da_list); 2578 2579 r = dm_dmub_sw_init(adev); 2580 if (r) 2581 return r; 2582 2583 return load_dmcu_fw(adev); 2584 } 2585 2586 static int dm_sw_fini(struct amdgpu_ip_block *ip_block) 2587 { 2588 struct amdgpu_device *adev = ip_block->adev; 2589 struct dal_allocation *da; 2590 2591 list_for_each_entry(da, &adev->dm.da_list, list) { 2592 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { 2593 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 2594 list_del(&da->list); 2595 kfree(da); 2596 adev->dm.bb_from_dmub = NULL; 2597 break; 2598 } 2599 } 2600 2601 2602 kfree(adev->dm.dmub_fb_info); 2603 adev->dm.dmub_fb_info = NULL; 2604 2605 if (adev->dm.dmub_srv) { 2606 dmub_srv_destroy(adev->dm.dmub_srv); 2607 kfree(adev->dm.dmub_srv); 2608 adev->dm.dmub_srv = NULL; 2609 } 2610 2611 amdgpu_ucode_release(&adev->dm.dmub_fw); 2612 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2613 2614 return 0; 2615 } 2616 2617 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2618 { 2619 struct amdgpu_dm_connector *aconnector; 2620 struct drm_connector *connector; 2621 struct drm_connector_list_iter iter; 2622 int ret = 0; 2623 2624 drm_connector_list_iter_begin(dev, &iter); 2625 drm_for_each_connector_iter(connector, &iter) { 2626 2627 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2628 continue; 2629 2630 aconnector = to_amdgpu_dm_connector(connector); 2631 if (aconnector->dc_link->type == dc_connection_mst_branch && 2632 aconnector->mst_mgr.aux) { 2633 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", 2634 aconnector, 2635 aconnector->base.base.id); 2636 2637 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2638 if (ret < 0) { 2639 drm_err(dev, "DM_MST: Failed to start MST\n"); 2640 aconnector->dc_link->type = 2641 dc_connection_single; 2642 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2643 aconnector->dc_link); 2644 break; 2645 } 2646 } 2647 } 2648 drm_connector_list_iter_end(&iter); 2649 2650 return ret; 2651 } 2652 2653 static int dm_late_init(struct amdgpu_ip_block *ip_block) 2654 { 2655 struct amdgpu_device *adev = ip_block->adev; 2656 2657 struct dmcu_iram_parameters params; 2658 unsigned int linear_lut[16]; 2659 int i; 2660 struct dmcu *dmcu = NULL; 2661 2662 dmcu = adev->dm.dc->res_pool->dmcu; 2663 2664 for (i = 0; i < 16; i++) 2665 linear_lut[i] = 0xFFFF * i / 15; 2666 2667 params.set = 0; 2668 params.backlight_ramping_override = false; 2669 params.backlight_ramping_start = 0xCCCC; 2670 params.backlight_ramping_reduction = 0xCCCCCCCC; 2671 params.backlight_lut_array_size = 16; 2672 params.backlight_lut_array = linear_lut; 2673 2674 /* Min backlight level after ABM reduction, Don't allow below 1% 2675 * 0xFFFF x 0.01 = 0x28F 2676 */ 2677 params.min_abm_backlight = 0x28F; 2678 /* In the case where abm is implemented on dmcub, 2679 * dmcu object will be null. 2680 * ABM 2.4 and up are implemented on dmcub. 2681 */ 2682 if (dmcu) { 2683 if (!dmcu_load_iram(dmcu, params)) 2684 return -EINVAL; 2685 } else if (adev->dm.dc->ctx->dmub_srv) { 2686 struct dc_link *edp_links[MAX_NUM_EDP]; 2687 int edp_num; 2688 2689 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2690 for (i = 0; i < edp_num; i++) { 2691 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2692 return -EINVAL; 2693 } 2694 } 2695 2696 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2697 } 2698 2699 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2700 { 2701 u8 buf[UUID_SIZE]; 2702 guid_t guid; 2703 int ret; 2704 2705 mutex_lock(&mgr->lock); 2706 if (!mgr->mst_primary) 2707 goto out_fail; 2708 2709 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2710 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2711 goto out_fail; 2712 } 2713 2714 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2715 DP_MST_EN | 2716 DP_UP_REQ_EN | 2717 DP_UPSTREAM_IS_SRC); 2718 if (ret < 0) { 2719 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2720 goto out_fail; 2721 } 2722 2723 /* Some hubs forget their guids after they resume */ 2724 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 2725 if (ret != sizeof(buf)) { 2726 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2727 goto out_fail; 2728 } 2729 2730 import_guid(&guid, buf); 2731 2732 if (guid_is_null(&guid)) { 2733 guid_gen(&guid); 2734 export_guid(buf, &guid); 2735 2736 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); 2737 2738 if (ret != sizeof(buf)) { 2739 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2740 goto out_fail; 2741 } 2742 } 2743 2744 guid_copy(&mgr->mst_primary->guid, &guid); 2745 2746 out_fail: 2747 mutex_unlock(&mgr->lock); 2748 } 2749 2750 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2751 { 2752 struct amdgpu_dm_connector *aconnector; 2753 struct drm_connector *connector; 2754 struct drm_connector_list_iter iter; 2755 struct drm_dp_mst_topology_mgr *mgr; 2756 2757 drm_connector_list_iter_begin(dev, &iter); 2758 drm_for_each_connector_iter(connector, &iter) { 2759 2760 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2761 continue; 2762 2763 aconnector = to_amdgpu_dm_connector(connector); 2764 if (aconnector->dc_link->type != dc_connection_mst_branch || 2765 aconnector->mst_root) 2766 continue; 2767 2768 mgr = &aconnector->mst_mgr; 2769 2770 if (suspend) { 2771 drm_dp_mst_topology_mgr_suspend(mgr); 2772 } else { 2773 /* if extended timeout is supported in hardware, 2774 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2775 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2776 */ 2777 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2778 if (!dp_is_lttpr_present(aconnector->dc_link)) 2779 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2780 2781 /* TODO: move resume_mst_branch_status() into drm mst resume again 2782 * once topology probing work is pulled out from mst resume into mst 2783 * resume 2nd step. mst resume 2nd step should be called after old 2784 * state getting restored (i.e. drm_atomic_helper_resume()). 2785 */ 2786 resume_mst_branch_status(mgr); 2787 } 2788 } 2789 drm_connector_list_iter_end(&iter); 2790 } 2791 2792 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2793 { 2794 int ret = 0; 2795 2796 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2797 * on window driver dc implementation. 2798 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2799 * should be passed to smu during boot up and resume from s3. 2800 * boot up: dc calculate dcn watermark clock settings within dc_create, 2801 * dcn20_resource_construct 2802 * then call pplib functions below to pass the settings to smu: 2803 * smu_set_watermarks_for_clock_ranges 2804 * smu_set_watermarks_table 2805 * navi10_set_watermarks_table 2806 * smu_write_watermarks_table 2807 * 2808 * For Renoir, clock settings of dcn watermark are also fixed values. 2809 * dc has implemented different flow for window driver: 2810 * dc_hardware_init / dc_set_power_state 2811 * dcn10_init_hw 2812 * notify_wm_ranges 2813 * set_wm_ranges 2814 * -- Linux 2815 * smu_set_watermarks_for_clock_ranges 2816 * renoir_set_watermarks_table 2817 * smu_write_watermarks_table 2818 * 2819 * For Linux, 2820 * dc_hardware_init -> amdgpu_dm_init 2821 * dc_set_power_state --> dm_resume 2822 * 2823 * therefore, this function apply to navi10/12/14 but not Renoir 2824 * * 2825 */ 2826 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2827 case IP_VERSION(2, 0, 2): 2828 case IP_VERSION(2, 0, 0): 2829 break; 2830 default: 2831 return 0; 2832 } 2833 2834 ret = amdgpu_dpm_write_watermarks_table(adev); 2835 if (ret) { 2836 DRM_ERROR("Failed to update WMTABLE!\n"); 2837 return ret; 2838 } 2839 2840 return 0; 2841 } 2842 2843 /** 2844 * dm_hw_init() - Initialize DC device 2845 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2846 * 2847 * Initialize the &struct amdgpu_display_manager device. This involves calling 2848 * the initializers of each DM component, then populating the struct with them. 2849 * 2850 * Although the function implies hardware initialization, both hardware and 2851 * software are initialized here. Splitting them out to their relevant init 2852 * hooks is a future TODO item. 2853 * 2854 * Some notable things that are initialized here: 2855 * 2856 * - Display Core, both software and hardware 2857 * - DC modules that we need (freesync and color management) 2858 * - DRM software states 2859 * - Interrupt sources and handlers 2860 * - Vblank support 2861 * - Debug FS entries, if enabled 2862 */ 2863 static int dm_hw_init(struct amdgpu_ip_block *ip_block) 2864 { 2865 struct amdgpu_device *adev = ip_block->adev; 2866 int r; 2867 2868 /* Create DAL display manager */ 2869 r = amdgpu_dm_init(adev); 2870 if (r) 2871 return r; 2872 amdgpu_dm_hpd_init(adev); 2873 2874 return 0; 2875 } 2876 2877 /** 2878 * dm_hw_fini() - Teardown DC device 2879 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2880 * 2881 * Teardown components within &struct amdgpu_display_manager that require 2882 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2883 * were loaded. Also flush IRQ workqueues and disable them. 2884 */ 2885 static int dm_hw_fini(struct amdgpu_ip_block *ip_block) 2886 { 2887 struct amdgpu_device *adev = ip_block->adev; 2888 2889 amdgpu_dm_hpd_fini(adev); 2890 2891 amdgpu_dm_irq_fini(adev); 2892 amdgpu_dm_fini(adev); 2893 return 0; 2894 } 2895 2896 2897 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2898 struct dc_state *state, bool enable) 2899 { 2900 enum dc_irq_source irq_source; 2901 struct amdgpu_crtc *acrtc; 2902 int rc = -EBUSY; 2903 int i = 0; 2904 2905 for (i = 0; i < state->stream_count; i++) { 2906 acrtc = get_crtc_by_otg_inst( 2907 adev, state->stream_status[i].primary_otg_inst); 2908 2909 if (acrtc && state->stream_status[i].plane_count != 0) { 2910 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2911 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2912 if (rc) 2913 DRM_WARN("Failed to %s pflip interrupts\n", 2914 enable ? "enable" : "disable"); 2915 2916 if (enable) { 2917 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2918 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2919 } else 2920 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2921 2922 if (rc) 2923 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2924 2925 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2926 /* During gpu-reset we disable and then enable vblank irq, so 2927 * don't use amdgpu_irq_get/put() to avoid refcount change. 2928 */ 2929 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2930 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2931 } 2932 } 2933 2934 } 2935 2936 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2937 { 2938 struct dc_state *context = NULL; 2939 enum dc_status res = DC_ERROR_UNEXPECTED; 2940 int i; 2941 struct dc_stream_state *del_streams[MAX_PIPES]; 2942 int del_streams_count = 0; 2943 struct dc_commit_streams_params params = {}; 2944 2945 memset(del_streams, 0, sizeof(del_streams)); 2946 2947 context = dc_state_create_current_copy(dc); 2948 if (context == NULL) 2949 goto context_alloc_fail; 2950 2951 /* First remove from context all streams */ 2952 for (i = 0; i < context->stream_count; i++) { 2953 struct dc_stream_state *stream = context->streams[i]; 2954 2955 del_streams[del_streams_count++] = stream; 2956 } 2957 2958 /* Remove all planes for removed streams and then remove the streams */ 2959 for (i = 0; i < del_streams_count; i++) { 2960 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2961 res = DC_FAIL_DETACH_SURFACES; 2962 goto fail; 2963 } 2964 2965 res = dc_state_remove_stream(dc, context, del_streams[i]); 2966 if (res != DC_OK) 2967 goto fail; 2968 } 2969 2970 params.streams = context->streams; 2971 params.stream_count = context->stream_count; 2972 res = dc_commit_streams(dc, ¶ms); 2973 2974 fail: 2975 dc_state_release(context); 2976 2977 context_alloc_fail: 2978 return res; 2979 } 2980 2981 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2982 { 2983 int i; 2984 2985 if (dm->hpd_rx_offload_wq) { 2986 for (i = 0; i < dm->dc->caps.max_links; i++) 2987 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2988 } 2989 } 2990 2991 static int dm_suspend(struct amdgpu_ip_block *ip_block) 2992 { 2993 struct amdgpu_device *adev = ip_block->adev; 2994 struct amdgpu_display_manager *dm = &adev->dm; 2995 int ret = 0; 2996 2997 if (amdgpu_in_reset(adev)) { 2998 mutex_lock(&dm->dc_lock); 2999 3000 dc_allow_idle_optimizations(adev->dm.dc, false); 3001 3002 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 3003 3004 if (dm->cached_dc_state) 3005 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 3006 3007 amdgpu_dm_commit_zero_streams(dm->dc); 3008 3009 amdgpu_dm_irq_suspend(adev); 3010 3011 hpd_rx_irq_work_suspend(dm); 3012 3013 return ret; 3014 } 3015 3016 WARN_ON(adev->dm.cached_state); 3017 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3018 if (IS_ERR(adev->dm.cached_state)) 3019 return PTR_ERR(adev->dm.cached_state); 3020 3021 s3_handle_mst(adev_to_drm(adev), true); 3022 3023 amdgpu_dm_irq_suspend(adev); 3024 3025 hpd_rx_irq_work_suspend(dm); 3026 3027 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 3028 3029 if (dm->dc->caps.ips_support && adev->in_s0ix) 3030 dc_allow_idle_optimizations(dm->dc, true); 3031 3032 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 3033 3034 return 0; 3035 } 3036 3037 struct drm_connector * 3038 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 3039 struct drm_crtc *crtc) 3040 { 3041 u32 i; 3042 struct drm_connector_state *new_con_state; 3043 struct drm_connector *connector; 3044 struct drm_crtc *crtc_from_state; 3045 3046 for_each_new_connector_in_state(state, connector, new_con_state, i) { 3047 crtc_from_state = new_con_state->crtc; 3048 3049 if (crtc_from_state == crtc) 3050 return connector; 3051 } 3052 3053 return NULL; 3054 } 3055 3056 static void emulated_link_detect(struct dc_link *link) 3057 { 3058 struct dc_sink_init_data sink_init_data = { 0 }; 3059 struct display_sink_capability sink_caps = { 0 }; 3060 enum dc_edid_status edid_status; 3061 struct dc_context *dc_ctx = link->ctx; 3062 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 3063 struct dc_sink *sink = NULL; 3064 struct dc_sink *prev_sink = NULL; 3065 3066 link->type = dc_connection_none; 3067 prev_sink = link->local_sink; 3068 3069 if (prev_sink) 3070 dc_sink_release(prev_sink); 3071 3072 switch (link->connector_signal) { 3073 case SIGNAL_TYPE_HDMI_TYPE_A: { 3074 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3075 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 3076 break; 3077 } 3078 3079 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 3080 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3081 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 3082 break; 3083 } 3084 3085 case SIGNAL_TYPE_DVI_DUAL_LINK: { 3086 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3087 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 3088 break; 3089 } 3090 3091 case SIGNAL_TYPE_LVDS: { 3092 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3093 sink_caps.signal = SIGNAL_TYPE_LVDS; 3094 break; 3095 } 3096 3097 case SIGNAL_TYPE_EDP: { 3098 sink_caps.transaction_type = 3099 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3100 sink_caps.signal = SIGNAL_TYPE_EDP; 3101 break; 3102 } 3103 3104 case SIGNAL_TYPE_DISPLAY_PORT: { 3105 sink_caps.transaction_type = 3106 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3107 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 3108 break; 3109 } 3110 3111 default: 3112 drm_err(dev, "Invalid connector type! signal:%d\n", 3113 link->connector_signal); 3114 return; 3115 } 3116 3117 sink_init_data.link = link; 3118 sink_init_data.sink_signal = sink_caps.signal; 3119 3120 sink = dc_sink_create(&sink_init_data); 3121 if (!sink) { 3122 drm_err(dev, "Failed to create sink!\n"); 3123 return; 3124 } 3125 3126 /* dc_sink_create returns a new reference */ 3127 link->local_sink = sink; 3128 3129 edid_status = dm_helpers_read_local_edid( 3130 link->ctx, 3131 link, 3132 sink); 3133 3134 if (edid_status != EDID_OK) 3135 drm_err(dev, "Failed to read EDID\n"); 3136 3137 } 3138 3139 static void dm_gpureset_commit_state(struct dc_state *dc_state, 3140 struct amdgpu_display_manager *dm) 3141 { 3142 struct { 3143 struct dc_surface_update surface_updates[MAX_SURFACES]; 3144 struct dc_plane_info plane_infos[MAX_SURFACES]; 3145 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 3146 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 3147 struct dc_stream_update stream_update; 3148 } *bundle; 3149 int k, m; 3150 3151 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 3152 3153 if (!bundle) { 3154 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 3155 goto cleanup; 3156 } 3157 3158 for (k = 0; k < dc_state->stream_count; k++) { 3159 bundle->stream_update.stream = dc_state->streams[k]; 3160 3161 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 3162 bundle->surface_updates[m].surface = 3163 dc_state->stream_status->plane_states[m]; 3164 bundle->surface_updates[m].surface->force_full_update = 3165 true; 3166 } 3167 3168 update_planes_and_stream_adapter(dm->dc, 3169 UPDATE_TYPE_FULL, 3170 dc_state->stream_status->plane_count, 3171 dc_state->streams[k], 3172 &bundle->stream_update, 3173 bundle->surface_updates); 3174 } 3175 3176 cleanup: 3177 kfree(bundle); 3178 } 3179 3180 static int dm_resume(struct amdgpu_ip_block *ip_block) 3181 { 3182 struct amdgpu_device *adev = ip_block->adev; 3183 struct drm_device *ddev = adev_to_drm(adev); 3184 struct amdgpu_display_manager *dm = &adev->dm; 3185 struct amdgpu_dm_connector *aconnector; 3186 struct drm_connector *connector; 3187 struct drm_connector_list_iter iter; 3188 struct drm_crtc *crtc; 3189 struct drm_crtc_state *new_crtc_state; 3190 struct dm_crtc_state *dm_new_crtc_state; 3191 struct drm_plane *plane; 3192 struct drm_plane_state *new_plane_state; 3193 struct dm_plane_state *dm_new_plane_state; 3194 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 3195 enum dc_connection_type new_connection_type = dc_connection_none; 3196 struct dc_state *dc_state; 3197 int i, r, j; 3198 struct dc_commit_streams_params commit_params = {}; 3199 3200 if (dm->dc->caps.ips_support) { 3201 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3202 } 3203 3204 if (amdgpu_in_reset(adev)) { 3205 dc_state = dm->cached_dc_state; 3206 3207 /* 3208 * The dc->current_state is backed up into dm->cached_dc_state 3209 * before we commit 0 streams. 3210 * 3211 * DC will clear link encoder assignments on the real state 3212 * but the changes won't propagate over to the copy we made 3213 * before the 0 streams commit. 3214 * 3215 * DC expects that link encoder assignments are *not* valid 3216 * when committing a state, so as a workaround we can copy 3217 * off of the current state. 3218 * 3219 * We lose the previous assignments, but we had already 3220 * commit 0 streams anyway. 3221 */ 3222 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 3223 3224 r = dm_dmub_hw_init(adev); 3225 if (r) 3226 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 3227 3228 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3229 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3230 3231 dc_resume(dm->dc); 3232 3233 amdgpu_dm_irq_resume_early(adev); 3234 3235 for (i = 0; i < dc_state->stream_count; i++) { 3236 dc_state->streams[i]->mode_changed = true; 3237 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 3238 dc_state->stream_status[i].plane_states[j]->update_flags.raw 3239 = 0xffffffff; 3240 } 3241 } 3242 3243 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3244 amdgpu_dm_outbox_init(adev); 3245 dc_enable_dmub_outbox(adev->dm.dc); 3246 } 3247 3248 commit_params.streams = dc_state->streams; 3249 commit_params.stream_count = dc_state->stream_count; 3250 dc_exit_ips_for_hw_access(dm->dc); 3251 WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); 3252 3253 dm_gpureset_commit_state(dm->cached_dc_state, dm); 3254 3255 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 3256 3257 dc_state_release(dm->cached_dc_state); 3258 dm->cached_dc_state = NULL; 3259 3260 amdgpu_dm_irq_resume_late(adev); 3261 3262 mutex_unlock(&dm->dc_lock); 3263 3264 return 0; 3265 } 3266 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3267 dc_state_release(dm_state->context); 3268 dm_state->context = dc_state_create(dm->dc, NULL); 3269 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 3270 3271 /* Before powering on DC we need to re-initialize DMUB. */ 3272 dm_dmub_hw_resume(adev); 3273 3274 /* Re-enable outbox interrupts for DPIA. */ 3275 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3276 amdgpu_dm_outbox_init(adev); 3277 dc_enable_dmub_outbox(adev->dm.dc); 3278 } 3279 3280 /* power on hardware */ 3281 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3282 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3283 3284 /* program HPD filter */ 3285 dc_resume(dm->dc); 3286 3287 /* 3288 * early enable HPD Rx IRQ, should be done before set mode as short 3289 * pulse interrupts are used for MST 3290 */ 3291 amdgpu_dm_irq_resume_early(adev); 3292 3293 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 3294 s3_handle_mst(ddev, false); 3295 3296 /* Do detection*/ 3297 drm_connector_list_iter_begin(ddev, &iter); 3298 drm_for_each_connector_iter(connector, &iter) { 3299 3300 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3301 continue; 3302 3303 aconnector = to_amdgpu_dm_connector(connector); 3304 3305 if (!aconnector->dc_link) 3306 continue; 3307 3308 /* 3309 * this is the case when traversing through already created end sink 3310 * MST connectors, should be skipped 3311 */ 3312 if (aconnector->mst_root) 3313 continue; 3314 3315 mutex_lock(&aconnector->hpd_lock); 3316 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3317 DRM_ERROR("KMS: Failed to detect connector\n"); 3318 3319 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3320 emulated_link_detect(aconnector->dc_link); 3321 } else { 3322 mutex_lock(&dm->dc_lock); 3323 dc_exit_ips_for_hw_access(dm->dc); 3324 dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); 3325 mutex_unlock(&dm->dc_lock); 3326 } 3327 3328 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 3329 aconnector->fake_enable = false; 3330 3331 if (aconnector->dc_sink) 3332 dc_sink_release(aconnector->dc_sink); 3333 aconnector->dc_sink = NULL; 3334 amdgpu_dm_update_connector_after_detect(aconnector); 3335 mutex_unlock(&aconnector->hpd_lock); 3336 } 3337 drm_connector_list_iter_end(&iter); 3338 3339 /* Force mode set in atomic commit */ 3340 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3341 new_crtc_state->active_changed = true; 3342 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3343 reset_freesync_config_for_crtc(dm_new_crtc_state); 3344 } 3345 3346 /* 3347 * atomic_check is expected to create the dc states. We need to release 3348 * them here, since they were duplicated as part of the suspend 3349 * procedure. 3350 */ 3351 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3352 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3353 if (dm_new_crtc_state->stream) { 3354 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 3355 dc_stream_release(dm_new_crtc_state->stream); 3356 dm_new_crtc_state->stream = NULL; 3357 } 3358 dm_new_crtc_state->base.color_mgmt_changed = true; 3359 } 3360 3361 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 3362 dm_new_plane_state = to_dm_plane_state(new_plane_state); 3363 if (dm_new_plane_state->dc_state) { 3364 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 3365 dc_plane_state_release(dm_new_plane_state->dc_state); 3366 dm_new_plane_state->dc_state = NULL; 3367 } 3368 } 3369 3370 drm_atomic_helper_resume(ddev, dm->cached_state); 3371 3372 dm->cached_state = NULL; 3373 3374 /* Do mst topology probing after resuming cached state*/ 3375 drm_connector_list_iter_begin(ddev, &iter); 3376 drm_for_each_connector_iter(connector, &iter) { 3377 3378 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3379 continue; 3380 3381 aconnector = to_amdgpu_dm_connector(connector); 3382 if (aconnector->dc_link->type != dc_connection_mst_branch || 3383 aconnector->mst_root) 3384 continue; 3385 3386 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3387 } 3388 drm_connector_list_iter_end(&iter); 3389 3390 amdgpu_dm_irq_resume_late(adev); 3391 3392 amdgpu_dm_smu_write_watermarks_table(adev); 3393 3394 drm_kms_helper_hotplug_event(ddev); 3395 3396 return 0; 3397 } 3398 3399 /** 3400 * DOC: DM Lifecycle 3401 * 3402 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3403 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3404 * the base driver's device list to be initialized and torn down accordingly. 3405 * 3406 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3407 */ 3408 3409 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3410 .name = "dm", 3411 .early_init = dm_early_init, 3412 .late_init = dm_late_init, 3413 .sw_init = dm_sw_init, 3414 .sw_fini = dm_sw_fini, 3415 .early_fini = amdgpu_dm_early_fini, 3416 .hw_init = dm_hw_init, 3417 .hw_fini = dm_hw_fini, 3418 .suspend = dm_suspend, 3419 .resume = dm_resume, 3420 .is_idle = dm_is_idle, 3421 .wait_for_idle = dm_wait_for_idle, 3422 .check_soft_reset = dm_check_soft_reset, 3423 .soft_reset = dm_soft_reset, 3424 .set_clockgating_state = dm_set_clockgating_state, 3425 .set_powergating_state = dm_set_powergating_state, 3426 }; 3427 3428 const struct amdgpu_ip_block_version dm_ip_block = { 3429 .type = AMD_IP_BLOCK_TYPE_DCE, 3430 .major = 1, 3431 .minor = 0, 3432 .rev = 0, 3433 .funcs = &amdgpu_dm_funcs, 3434 }; 3435 3436 3437 /** 3438 * DOC: atomic 3439 * 3440 * *WIP* 3441 */ 3442 3443 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3444 .fb_create = amdgpu_display_user_framebuffer_create, 3445 .get_format_info = amdgpu_dm_plane_get_format_info, 3446 .atomic_check = amdgpu_dm_atomic_check, 3447 .atomic_commit = drm_atomic_helper_commit, 3448 }; 3449 3450 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3451 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3452 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3453 }; 3454 3455 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3456 { 3457 struct amdgpu_dm_backlight_caps *caps; 3458 struct drm_connector *conn_base; 3459 struct amdgpu_device *adev; 3460 struct drm_luminance_range_info *luminance_range; 3461 3462 if (aconnector->bl_idx == -1 || 3463 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3464 return; 3465 3466 conn_base = &aconnector->base; 3467 adev = drm_to_adev(conn_base->dev); 3468 3469 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3470 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3471 caps->aux_support = false; 3472 3473 if (caps->ext_caps->bits.oled == 1 3474 /* 3475 * || 3476 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3477 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3478 */) 3479 caps->aux_support = true; 3480 3481 if (amdgpu_backlight == 0) 3482 caps->aux_support = false; 3483 else if (amdgpu_backlight == 1) 3484 caps->aux_support = true; 3485 if (caps->aux_support) 3486 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; 3487 3488 luminance_range = &conn_base->display_info.luminance_range; 3489 3490 if (luminance_range->max_luminance) { 3491 caps->aux_min_input_signal = luminance_range->min_luminance; 3492 caps->aux_max_input_signal = luminance_range->max_luminance; 3493 } else { 3494 caps->aux_min_input_signal = 0; 3495 caps->aux_max_input_signal = 512; 3496 } 3497 } 3498 3499 void amdgpu_dm_update_connector_after_detect( 3500 struct amdgpu_dm_connector *aconnector) 3501 { 3502 struct drm_connector *connector = &aconnector->base; 3503 struct drm_device *dev = connector->dev; 3504 struct dc_sink *sink; 3505 3506 /* MST handled by drm_mst framework */ 3507 if (aconnector->mst_mgr.mst_state == true) 3508 return; 3509 3510 sink = aconnector->dc_link->local_sink; 3511 if (sink) 3512 dc_sink_retain(sink); 3513 3514 /* 3515 * Edid mgmt connector gets first update only in mode_valid hook and then 3516 * the connector sink is set to either fake or physical sink depends on link status. 3517 * Skip if already done during boot. 3518 */ 3519 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3520 && aconnector->dc_em_sink) { 3521 3522 /* 3523 * For S3 resume with headless use eml_sink to fake stream 3524 * because on resume connector->sink is set to NULL 3525 */ 3526 mutex_lock(&dev->mode_config.mutex); 3527 3528 if (sink) { 3529 if (aconnector->dc_sink) { 3530 amdgpu_dm_update_freesync_caps(connector, NULL); 3531 /* 3532 * retain and release below are used to 3533 * bump up refcount for sink because the link doesn't point 3534 * to it anymore after disconnect, so on next crtc to connector 3535 * reshuffle by UMD we will get into unwanted dc_sink release 3536 */ 3537 dc_sink_release(aconnector->dc_sink); 3538 } 3539 aconnector->dc_sink = sink; 3540 dc_sink_retain(aconnector->dc_sink); 3541 amdgpu_dm_update_freesync_caps(connector, 3542 aconnector->drm_edid); 3543 } else { 3544 amdgpu_dm_update_freesync_caps(connector, NULL); 3545 if (!aconnector->dc_sink) { 3546 aconnector->dc_sink = aconnector->dc_em_sink; 3547 dc_sink_retain(aconnector->dc_sink); 3548 } 3549 } 3550 3551 mutex_unlock(&dev->mode_config.mutex); 3552 3553 if (sink) 3554 dc_sink_release(sink); 3555 return; 3556 } 3557 3558 /* 3559 * TODO: temporary guard to look for proper fix 3560 * if this sink is MST sink, we should not do anything 3561 */ 3562 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3563 dc_sink_release(sink); 3564 return; 3565 } 3566 3567 if (aconnector->dc_sink == sink) { 3568 /* 3569 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3570 * Do nothing!! 3571 */ 3572 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", 3573 aconnector->connector_id); 3574 if (sink) 3575 dc_sink_release(sink); 3576 return; 3577 } 3578 3579 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3580 aconnector->connector_id, aconnector->dc_sink, sink); 3581 3582 mutex_lock(&dev->mode_config.mutex); 3583 3584 /* 3585 * 1. Update status of the drm connector 3586 * 2. Send an event and let userspace tell us what to do 3587 */ 3588 if (sink) { 3589 /* 3590 * TODO: check if we still need the S3 mode update workaround. 3591 * If yes, put it here. 3592 */ 3593 if (aconnector->dc_sink) { 3594 amdgpu_dm_update_freesync_caps(connector, NULL); 3595 dc_sink_release(aconnector->dc_sink); 3596 } 3597 3598 aconnector->dc_sink = sink; 3599 dc_sink_retain(aconnector->dc_sink); 3600 if (sink->dc_edid.length == 0) { 3601 aconnector->drm_edid = NULL; 3602 if (aconnector->dc_link->aux_mode) { 3603 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3604 } 3605 } else { 3606 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; 3607 3608 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); 3609 drm_edid_connector_update(connector, aconnector->drm_edid); 3610 3611 if (aconnector->dc_link->aux_mode) 3612 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, 3613 connector->display_info.source_physical_address); 3614 } 3615 3616 if (!aconnector->timing_requested) { 3617 aconnector->timing_requested = 3618 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3619 if (!aconnector->timing_requested) 3620 drm_err(dev, 3621 "failed to create aconnector->requested_timing\n"); 3622 } 3623 3624 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 3625 update_connector_ext_caps(aconnector); 3626 } else { 3627 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3628 amdgpu_dm_update_freesync_caps(connector, NULL); 3629 aconnector->num_modes = 0; 3630 dc_sink_release(aconnector->dc_sink); 3631 aconnector->dc_sink = NULL; 3632 drm_edid_free(aconnector->drm_edid); 3633 aconnector->drm_edid = NULL; 3634 kfree(aconnector->timing_requested); 3635 aconnector->timing_requested = NULL; 3636 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3637 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3638 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3639 } 3640 3641 mutex_unlock(&dev->mode_config.mutex); 3642 3643 update_subconnector_property(aconnector); 3644 3645 if (sink) 3646 dc_sink_release(sink); 3647 } 3648 3649 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3650 { 3651 struct drm_connector *connector = &aconnector->base; 3652 struct drm_device *dev = connector->dev; 3653 enum dc_connection_type new_connection_type = dc_connection_none; 3654 struct amdgpu_device *adev = drm_to_adev(dev); 3655 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3656 struct dc *dc = aconnector->dc_link->ctx->dc; 3657 bool ret = false; 3658 3659 if (adev->dm.disable_hpd_irq) 3660 return; 3661 3662 /* 3663 * In case of failure or MST no need to update connector status or notify the OS 3664 * since (for MST case) MST does this in its own context. 3665 */ 3666 mutex_lock(&aconnector->hpd_lock); 3667 3668 if (adev->dm.hdcp_workqueue) { 3669 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3670 dm_con_state->update_hdcp = true; 3671 } 3672 if (aconnector->fake_enable) 3673 aconnector->fake_enable = false; 3674 3675 aconnector->timing_changed = false; 3676 3677 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3678 DRM_ERROR("KMS: Failed to detect connector\n"); 3679 3680 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3681 emulated_link_detect(aconnector->dc_link); 3682 3683 drm_modeset_lock_all(dev); 3684 dm_restore_drm_connector_state(dev, connector); 3685 drm_modeset_unlock_all(dev); 3686 3687 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3688 drm_kms_helper_connector_hotplug_event(connector); 3689 } else { 3690 mutex_lock(&adev->dm.dc_lock); 3691 dc_exit_ips_for_hw_access(dc); 3692 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3693 mutex_unlock(&adev->dm.dc_lock); 3694 if (ret) { 3695 amdgpu_dm_update_connector_after_detect(aconnector); 3696 3697 drm_modeset_lock_all(dev); 3698 dm_restore_drm_connector_state(dev, connector); 3699 drm_modeset_unlock_all(dev); 3700 3701 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3702 drm_kms_helper_connector_hotplug_event(connector); 3703 } 3704 } 3705 mutex_unlock(&aconnector->hpd_lock); 3706 3707 } 3708 3709 static void handle_hpd_irq(void *param) 3710 { 3711 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3712 3713 handle_hpd_irq_helper(aconnector); 3714 3715 } 3716 3717 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3718 union hpd_irq_data hpd_irq_data) 3719 { 3720 struct hpd_rx_irq_offload_work *offload_work = 3721 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3722 3723 if (!offload_work) { 3724 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3725 return; 3726 } 3727 3728 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3729 offload_work->data = hpd_irq_data; 3730 offload_work->offload_wq = offload_wq; 3731 3732 queue_work(offload_wq->wq, &offload_work->work); 3733 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3734 } 3735 3736 static void handle_hpd_rx_irq(void *param) 3737 { 3738 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3739 struct drm_connector *connector = &aconnector->base; 3740 struct drm_device *dev = connector->dev; 3741 struct dc_link *dc_link = aconnector->dc_link; 3742 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3743 bool result = false; 3744 enum dc_connection_type new_connection_type = dc_connection_none; 3745 struct amdgpu_device *adev = drm_to_adev(dev); 3746 union hpd_irq_data hpd_irq_data; 3747 bool link_loss = false; 3748 bool has_left_work = false; 3749 int idx = dc_link->link_index; 3750 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3751 struct dc *dc = aconnector->dc_link->ctx->dc; 3752 3753 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3754 3755 if (adev->dm.disable_hpd_irq) 3756 return; 3757 3758 /* 3759 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3760 * conflict, after implement i2c helper, this mutex should be 3761 * retired. 3762 */ 3763 mutex_lock(&aconnector->hpd_lock); 3764 3765 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3766 &link_loss, true, &has_left_work); 3767 3768 if (!has_left_work) 3769 goto out; 3770 3771 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3772 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3773 goto out; 3774 } 3775 3776 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3777 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3778 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3779 bool skip = false; 3780 3781 /* 3782 * DOWN_REP_MSG_RDY is also handled by polling method 3783 * mgr->cbs->poll_hpd_irq() 3784 */ 3785 spin_lock(&offload_wq->offload_lock); 3786 skip = offload_wq->is_handling_mst_msg_rdy_event; 3787 3788 if (!skip) 3789 offload_wq->is_handling_mst_msg_rdy_event = true; 3790 3791 spin_unlock(&offload_wq->offload_lock); 3792 3793 if (!skip) 3794 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3795 3796 goto out; 3797 } 3798 3799 if (link_loss) { 3800 bool skip = false; 3801 3802 spin_lock(&offload_wq->offload_lock); 3803 skip = offload_wq->is_handling_link_loss; 3804 3805 if (!skip) 3806 offload_wq->is_handling_link_loss = true; 3807 3808 spin_unlock(&offload_wq->offload_lock); 3809 3810 if (!skip) 3811 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3812 3813 goto out; 3814 } 3815 } 3816 3817 out: 3818 if (result && !is_mst_root_connector) { 3819 /* Downstream Port status changed. */ 3820 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3821 DRM_ERROR("KMS: Failed to detect connector\n"); 3822 3823 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3824 emulated_link_detect(dc_link); 3825 3826 if (aconnector->fake_enable) 3827 aconnector->fake_enable = false; 3828 3829 amdgpu_dm_update_connector_after_detect(aconnector); 3830 3831 3832 drm_modeset_lock_all(dev); 3833 dm_restore_drm_connector_state(dev, connector); 3834 drm_modeset_unlock_all(dev); 3835 3836 drm_kms_helper_connector_hotplug_event(connector); 3837 } else { 3838 bool ret = false; 3839 3840 mutex_lock(&adev->dm.dc_lock); 3841 dc_exit_ips_for_hw_access(dc); 3842 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3843 mutex_unlock(&adev->dm.dc_lock); 3844 3845 if (ret) { 3846 if (aconnector->fake_enable) 3847 aconnector->fake_enable = false; 3848 3849 amdgpu_dm_update_connector_after_detect(aconnector); 3850 3851 drm_modeset_lock_all(dev); 3852 dm_restore_drm_connector_state(dev, connector); 3853 drm_modeset_unlock_all(dev); 3854 3855 drm_kms_helper_connector_hotplug_event(connector); 3856 } 3857 } 3858 } 3859 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3860 if (adev->dm.hdcp_workqueue) 3861 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3862 } 3863 3864 if (dc_link->type != dc_connection_mst_branch) 3865 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3866 3867 mutex_unlock(&aconnector->hpd_lock); 3868 } 3869 3870 static int register_hpd_handlers(struct amdgpu_device *adev) 3871 { 3872 struct drm_device *dev = adev_to_drm(adev); 3873 struct drm_connector *connector; 3874 struct amdgpu_dm_connector *aconnector; 3875 const struct dc_link *dc_link; 3876 struct dc_interrupt_params int_params = {0}; 3877 3878 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3879 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3880 3881 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3882 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, 3883 dmub_hpd_callback, true)) { 3884 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3885 return -EINVAL; 3886 } 3887 3888 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, 3889 dmub_hpd_callback, true)) { 3890 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3891 return -EINVAL; 3892 } 3893 3894 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 3895 dmub_hpd_sense_callback, true)) { 3896 DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); 3897 return -EINVAL; 3898 } 3899 } 3900 3901 list_for_each_entry(connector, 3902 &dev->mode_config.connector_list, head) { 3903 3904 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3905 continue; 3906 3907 aconnector = to_amdgpu_dm_connector(connector); 3908 dc_link = aconnector->dc_link; 3909 3910 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3911 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3912 int_params.irq_source = dc_link->irq_source_hpd; 3913 3914 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3915 int_params.irq_source < DC_IRQ_SOURCE_HPD1 || 3916 int_params.irq_source > DC_IRQ_SOURCE_HPD6) { 3917 DRM_ERROR("Failed to register hpd irq!\n"); 3918 return -EINVAL; 3919 } 3920 3921 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3922 handle_hpd_irq, (void *) aconnector)) 3923 return -ENOMEM; 3924 } 3925 3926 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3927 3928 /* Also register for DP short pulse (hpd_rx). */ 3929 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3930 int_params.irq_source = dc_link->irq_source_hpd_rx; 3931 3932 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3933 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || 3934 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { 3935 DRM_ERROR("Failed to register hpd rx irq!\n"); 3936 return -EINVAL; 3937 } 3938 3939 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3940 handle_hpd_rx_irq, (void *) aconnector)) 3941 return -ENOMEM; 3942 } 3943 } 3944 return 0; 3945 } 3946 3947 #if defined(CONFIG_DRM_AMD_DC_SI) 3948 /* Register IRQ sources and initialize IRQ callbacks */ 3949 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3950 { 3951 struct dc *dc = adev->dm.dc; 3952 struct common_irq_params *c_irq_params; 3953 struct dc_interrupt_params int_params = {0}; 3954 int r; 3955 int i; 3956 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3957 3958 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3959 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3960 3961 /* 3962 * Actions of amdgpu_irq_add_id(): 3963 * 1. Register a set() function with base driver. 3964 * Base driver will call set() function to enable/disable an 3965 * interrupt in DC hardware. 3966 * 2. Register amdgpu_dm_irq_handler(). 3967 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3968 * coming from DC hardware. 3969 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3970 * for acknowledging and handling. 3971 */ 3972 3973 /* Use VBLANK interrupt */ 3974 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3975 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3976 if (r) { 3977 DRM_ERROR("Failed to add crtc irq id!\n"); 3978 return r; 3979 } 3980 3981 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3982 int_params.irq_source = 3983 dc_interrupt_to_irq_source(dc, i + 1, 0); 3984 3985 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3986 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 3987 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 3988 DRM_ERROR("Failed to register vblank irq!\n"); 3989 return -EINVAL; 3990 } 3991 3992 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3993 3994 c_irq_params->adev = adev; 3995 c_irq_params->irq_src = int_params.irq_source; 3996 3997 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3998 dm_crtc_high_irq, c_irq_params)) 3999 return -ENOMEM; 4000 } 4001 4002 /* Use GRPH_PFLIP interrupt */ 4003 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4004 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4005 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4006 if (r) { 4007 DRM_ERROR("Failed to add page flip irq id!\n"); 4008 return r; 4009 } 4010 4011 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4012 int_params.irq_source = 4013 dc_interrupt_to_irq_source(dc, i, 0); 4014 4015 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4016 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4017 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4018 DRM_ERROR("Failed to register pflip irq!\n"); 4019 return -EINVAL; 4020 } 4021 4022 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4023 4024 c_irq_params->adev = adev; 4025 c_irq_params->irq_src = int_params.irq_source; 4026 4027 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4028 dm_pflip_high_irq, c_irq_params)) 4029 return -ENOMEM; 4030 } 4031 4032 /* HPD */ 4033 r = amdgpu_irq_add_id(adev, client_id, 4034 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4035 if (r) { 4036 DRM_ERROR("Failed to add hpd irq id!\n"); 4037 return r; 4038 } 4039 4040 r = register_hpd_handlers(adev); 4041 4042 return r; 4043 } 4044 #endif 4045 4046 /* Register IRQ sources and initialize IRQ callbacks */ 4047 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 4048 { 4049 struct dc *dc = adev->dm.dc; 4050 struct common_irq_params *c_irq_params; 4051 struct dc_interrupt_params int_params = {0}; 4052 int r; 4053 int i; 4054 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4055 4056 if (adev->family >= AMDGPU_FAMILY_AI) 4057 client_id = SOC15_IH_CLIENTID_DCE; 4058 4059 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4060 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4061 4062 /* 4063 * Actions of amdgpu_irq_add_id(): 4064 * 1. Register a set() function with base driver. 4065 * Base driver will call set() function to enable/disable an 4066 * interrupt in DC hardware. 4067 * 2. Register amdgpu_dm_irq_handler(). 4068 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4069 * coming from DC hardware. 4070 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4071 * for acknowledging and handling. 4072 */ 4073 4074 /* Use VBLANK interrupt */ 4075 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 4076 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 4077 if (r) { 4078 DRM_ERROR("Failed to add crtc irq id!\n"); 4079 return r; 4080 } 4081 4082 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4083 int_params.irq_source = 4084 dc_interrupt_to_irq_source(dc, i, 0); 4085 4086 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4087 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4088 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4089 DRM_ERROR("Failed to register vblank irq!\n"); 4090 return -EINVAL; 4091 } 4092 4093 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4094 4095 c_irq_params->adev = adev; 4096 c_irq_params->irq_src = int_params.irq_source; 4097 4098 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4099 dm_crtc_high_irq, c_irq_params)) 4100 return -ENOMEM; 4101 } 4102 4103 /* Use VUPDATE interrupt */ 4104 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 4105 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 4106 if (r) { 4107 DRM_ERROR("Failed to add vupdate irq id!\n"); 4108 return r; 4109 } 4110 4111 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4112 int_params.irq_source = 4113 dc_interrupt_to_irq_source(dc, i, 0); 4114 4115 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4116 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4117 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4118 DRM_ERROR("Failed to register vupdate irq!\n"); 4119 return -EINVAL; 4120 } 4121 4122 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4123 4124 c_irq_params->adev = adev; 4125 c_irq_params->irq_src = int_params.irq_source; 4126 4127 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4128 dm_vupdate_high_irq, c_irq_params)) 4129 return -ENOMEM; 4130 } 4131 4132 /* Use GRPH_PFLIP interrupt */ 4133 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4134 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4135 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4136 if (r) { 4137 DRM_ERROR("Failed to add page flip irq id!\n"); 4138 return r; 4139 } 4140 4141 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4142 int_params.irq_source = 4143 dc_interrupt_to_irq_source(dc, i, 0); 4144 4145 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4146 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4147 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4148 DRM_ERROR("Failed to register pflip irq!\n"); 4149 return -EINVAL; 4150 } 4151 4152 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4153 4154 c_irq_params->adev = adev; 4155 c_irq_params->irq_src = int_params.irq_source; 4156 4157 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4158 dm_pflip_high_irq, c_irq_params)) 4159 return -ENOMEM; 4160 } 4161 4162 /* HPD */ 4163 r = amdgpu_irq_add_id(adev, client_id, 4164 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4165 if (r) { 4166 DRM_ERROR("Failed to add hpd irq id!\n"); 4167 return r; 4168 } 4169 4170 r = register_hpd_handlers(adev); 4171 4172 return r; 4173 } 4174 4175 /* Register IRQ sources and initialize IRQ callbacks */ 4176 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 4177 { 4178 struct dc *dc = adev->dm.dc; 4179 struct common_irq_params *c_irq_params; 4180 struct dc_interrupt_params int_params = {0}; 4181 int r; 4182 int i; 4183 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4184 static const unsigned int vrtl_int_srcid[] = { 4185 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 4186 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 4187 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 4188 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 4189 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 4190 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 4191 }; 4192 #endif 4193 4194 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4195 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4196 4197 /* 4198 * Actions of amdgpu_irq_add_id(): 4199 * 1. Register a set() function with base driver. 4200 * Base driver will call set() function to enable/disable an 4201 * interrupt in DC hardware. 4202 * 2. Register amdgpu_dm_irq_handler(). 4203 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4204 * coming from DC hardware. 4205 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4206 * for acknowledging and handling. 4207 */ 4208 4209 /* Use VSTARTUP interrupt */ 4210 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 4211 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 4212 i++) { 4213 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 4214 4215 if (r) { 4216 DRM_ERROR("Failed to add crtc irq id!\n"); 4217 return r; 4218 } 4219 4220 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4221 int_params.irq_source = 4222 dc_interrupt_to_irq_source(dc, i, 0); 4223 4224 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4225 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4226 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4227 DRM_ERROR("Failed to register vblank irq!\n"); 4228 return -EINVAL; 4229 } 4230 4231 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4232 4233 c_irq_params->adev = adev; 4234 c_irq_params->irq_src = int_params.irq_source; 4235 4236 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4237 dm_crtc_high_irq, c_irq_params)) 4238 return -ENOMEM; 4239 } 4240 4241 /* Use otg vertical line interrupt */ 4242 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4243 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 4244 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 4245 vrtl_int_srcid[i], &adev->vline0_irq); 4246 4247 if (r) { 4248 DRM_ERROR("Failed to add vline0 irq id!\n"); 4249 return r; 4250 } 4251 4252 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4253 int_params.irq_source = 4254 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 4255 4256 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4257 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || 4258 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { 4259 DRM_ERROR("Failed to register vline0 irq!\n"); 4260 return -EINVAL; 4261 } 4262 4263 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 4264 - DC_IRQ_SOURCE_DC1_VLINE0]; 4265 4266 c_irq_params->adev = adev; 4267 c_irq_params->irq_src = int_params.irq_source; 4268 4269 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4270 dm_dcn_vertical_interrupt0_high_irq, 4271 c_irq_params)) 4272 return -ENOMEM; 4273 } 4274 #endif 4275 4276 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 4277 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 4278 * to trigger at end of each vblank, regardless of state of the lock, 4279 * matching DCE behaviour. 4280 */ 4281 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 4282 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 4283 i++) { 4284 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 4285 4286 if (r) { 4287 DRM_ERROR("Failed to add vupdate irq id!\n"); 4288 return r; 4289 } 4290 4291 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4292 int_params.irq_source = 4293 dc_interrupt_to_irq_source(dc, i, 0); 4294 4295 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4296 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4297 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4298 DRM_ERROR("Failed to register vupdate irq!\n"); 4299 return -EINVAL; 4300 } 4301 4302 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4303 4304 c_irq_params->adev = adev; 4305 c_irq_params->irq_src = int_params.irq_source; 4306 4307 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4308 dm_vupdate_high_irq, c_irq_params)) 4309 return -ENOMEM; 4310 } 4311 4312 /* Use GRPH_PFLIP interrupt */ 4313 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 4314 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 4315 i++) { 4316 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 4317 if (r) { 4318 DRM_ERROR("Failed to add page flip irq id!\n"); 4319 return r; 4320 } 4321 4322 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4323 int_params.irq_source = 4324 dc_interrupt_to_irq_source(dc, i, 0); 4325 4326 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4327 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4328 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4329 DRM_ERROR("Failed to register pflip irq!\n"); 4330 return -EINVAL; 4331 } 4332 4333 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4334 4335 c_irq_params->adev = adev; 4336 c_irq_params->irq_src = int_params.irq_source; 4337 4338 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4339 dm_pflip_high_irq, c_irq_params)) 4340 return -ENOMEM; 4341 } 4342 4343 /* HPD */ 4344 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 4345 &adev->hpd_irq); 4346 if (r) { 4347 DRM_ERROR("Failed to add hpd irq id!\n"); 4348 return r; 4349 } 4350 4351 r = register_hpd_handlers(adev); 4352 4353 return r; 4354 } 4355 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 4356 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 4357 { 4358 struct dc *dc = adev->dm.dc; 4359 struct common_irq_params *c_irq_params; 4360 struct dc_interrupt_params int_params = {0}; 4361 int r, i; 4362 4363 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4364 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4365 4366 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 4367 &adev->dmub_outbox_irq); 4368 if (r) { 4369 DRM_ERROR("Failed to add outbox irq id!\n"); 4370 return r; 4371 } 4372 4373 if (dc->ctx->dmub_srv) { 4374 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 4375 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4376 int_params.irq_source = 4377 dc_interrupt_to_irq_source(dc, i, 0); 4378 4379 c_irq_params = &adev->dm.dmub_outbox_params[0]; 4380 4381 c_irq_params->adev = adev; 4382 c_irq_params->irq_src = int_params.irq_source; 4383 4384 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4385 dm_dmub_outbox1_low_irq, c_irq_params)) 4386 return -ENOMEM; 4387 } 4388 4389 return 0; 4390 } 4391 4392 /* 4393 * Acquires the lock for the atomic state object and returns 4394 * the new atomic state. 4395 * 4396 * This should only be called during atomic check. 4397 */ 4398 int dm_atomic_get_state(struct drm_atomic_state *state, 4399 struct dm_atomic_state **dm_state) 4400 { 4401 struct drm_device *dev = state->dev; 4402 struct amdgpu_device *adev = drm_to_adev(dev); 4403 struct amdgpu_display_manager *dm = &adev->dm; 4404 struct drm_private_state *priv_state; 4405 4406 if (*dm_state) 4407 return 0; 4408 4409 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 4410 if (IS_ERR(priv_state)) 4411 return PTR_ERR(priv_state); 4412 4413 *dm_state = to_dm_atomic_state(priv_state); 4414 4415 return 0; 4416 } 4417 4418 static struct dm_atomic_state * 4419 dm_atomic_get_new_state(struct drm_atomic_state *state) 4420 { 4421 struct drm_device *dev = state->dev; 4422 struct amdgpu_device *adev = drm_to_adev(dev); 4423 struct amdgpu_display_manager *dm = &adev->dm; 4424 struct drm_private_obj *obj; 4425 struct drm_private_state *new_obj_state; 4426 int i; 4427 4428 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 4429 if (obj->funcs == dm->atomic_obj.funcs) 4430 return to_dm_atomic_state(new_obj_state); 4431 } 4432 4433 return NULL; 4434 } 4435 4436 static struct drm_private_state * 4437 dm_atomic_duplicate_state(struct drm_private_obj *obj) 4438 { 4439 struct dm_atomic_state *old_state, *new_state; 4440 4441 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 4442 if (!new_state) 4443 return NULL; 4444 4445 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 4446 4447 old_state = to_dm_atomic_state(obj->state); 4448 4449 if (old_state && old_state->context) 4450 new_state->context = dc_state_create_copy(old_state->context); 4451 4452 if (!new_state->context) { 4453 kfree(new_state); 4454 return NULL; 4455 } 4456 4457 return &new_state->base; 4458 } 4459 4460 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 4461 struct drm_private_state *state) 4462 { 4463 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4464 4465 if (dm_state && dm_state->context) 4466 dc_state_release(dm_state->context); 4467 4468 kfree(dm_state); 4469 } 4470 4471 static struct drm_private_state_funcs dm_atomic_state_funcs = { 4472 .atomic_duplicate_state = dm_atomic_duplicate_state, 4473 .atomic_destroy_state = dm_atomic_destroy_state, 4474 }; 4475 4476 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 4477 { 4478 struct dm_atomic_state *state; 4479 int r; 4480 4481 adev->mode_info.mode_config_initialized = true; 4482 4483 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 4484 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 4485 4486 adev_to_drm(adev)->mode_config.max_width = 16384; 4487 adev_to_drm(adev)->mode_config.max_height = 16384; 4488 4489 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4490 if (adev->asic_type == CHIP_HAWAII) 4491 /* disable prefer shadow for now due to hibernation issues */ 4492 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4493 else 4494 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4495 /* indicates support for immediate flip */ 4496 adev_to_drm(adev)->mode_config.async_page_flip = true; 4497 4498 state = kzalloc(sizeof(*state), GFP_KERNEL); 4499 if (!state) 4500 return -ENOMEM; 4501 4502 state->context = dc_state_create_current_copy(adev->dm.dc); 4503 if (!state->context) { 4504 kfree(state); 4505 return -ENOMEM; 4506 } 4507 4508 drm_atomic_private_obj_init(adev_to_drm(adev), 4509 &adev->dm.atomic_obj, 4510 &state->base, 4511 &dm_atomic_state_funcs); 4512 4513 r = amdgpu_display_modeset_create_props(adev); 4514 if (r) { 4515 dc_state_release(state->context); 4516 kfree(state); 4517 return r; 4518 } 4519 4520 #ifdef AMD_PRIVATE_COLOR 4521 if (amdgpu_dm_create_color_properties(adev)) { 4522 dc_state_release(state->context); 4523 kfree(state); 4524 return -ENOMEM; 4525 } 4526 #endif 4527 4528 r = amdgpu_dm_audio_init(adev); 4529 if (r) { 4530 dc_state_release(state->context); 4531 kfree(state); 4532 return r; 4533 } 4534 4535 return 0; 4536 } 4537 4538 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4539 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4540 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) 4541 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4542 4543 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4544 int bl_idx) 4545 { 4546 #if defined(CONFIG_ACPI) 4547 struct amdgpu_dm_backlight_caps caps; 4548 4549 memset(&caps, 0, sizeof(caps)); 4550 4551 if (dm->backlight_caps[bl_idx].caps_valid) 4552 return; 4553 4554 amdgpu_acpi_get_backlight_caps(&caps); 4555 4556 /* validate the firmware value is sane */ 4557 if (caps.caps_valid) { 4558 int spread = caps.max_input_signal - caps.min_input_signal; 4559 4560 if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4561 caps.min_input_signal < 0 || 4562 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4563 spread < AMDGPU_DM_MIN_SPREAD) { 4564 DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", 4565 caps.min_input_signal, caps.max_input_signal); 4566 caps.caps_valid = false; 4567 } 4568 } 4569 4570 if (caps.caps_valid) { 4571 dm->backlight_caps[bl_idx].caps_valid = true; 4572 if (caps.aux_support) 4573 return; 4574 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4575 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4576 } else { 4577 dm->backlight_caps[bl_idx].min_input_signal = 4578 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4579 dm->backlight_caps[bl_idx].max_input_signal = 4580 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4581 } 4582 #else 4583 if (dm->backlight_caps[bl_idx].aux_support) 4584 return; 4585 4586 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4587 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4588 #endif 4589 } 4590 4591 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4592 unsigned int *min, unsigned int *max) 4593 { 4594 if (!caps) 4595 return 0; 4596 4597 if (caps->aux_support) { 4598 // Firmware limits are in nits, DC API wants millinits. 4599 *max = 1000 * caps->aux_max_input_signal; 4600 *min = 1000 * caps->aux_min_input_signal; 4601 } else { 4602 // Firmware limits are 8-bit, PWM control is 16-bit. 4603 *max = 0x101 * caps->max_input_signal; 4604 *min = 0x101 * caps->min_input_signal; 4605 } 4606 return 1; 4607 } 4608 4609 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4610 uint32_t brightness) 4611 { 4612 unsigned int min, max; 4613 4614 if (!get_brightness_range(caps, &min, &max)) 4615 return brightness; 4616 4617 // Rescale 0..255 to min..max 4618 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4619 AMDGPU_MAX_BL_LEVEL); 4620 } 4621 4622 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4623 uint32_t brightness) 4624 { 4625 unsigned int min, max; 4626 4627 if (!get_brightness_range(caps, &min, &max)) 4628 return brightness; 4629 4630 if (brightness < min) 4631 return 0; 4632 // Rescale min..max to 0..255 4633 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4634 max - min); 4635 } 4636 4637 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4638 int bl_idx, 4639 u32 user_brightness) 4640 { 4641 struct amdgpu_dm_backlight_caps caps; 4642 struct dc_link *link; 4643 u32 brightness; 4644 bool rc, reallow_idle = false; 4645 4646 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4647 caps = dm->backlight_caps[bl_idx]; 4648 4649 dm->brightness[bl_idx] = user_brightness; 4650 /* update scratch register */ 4651 if (bl_idx == 0) 4652 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4653 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4654 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4655 4656 /* Change brightness based on AUX property */ 4657 mutex_lock(&dm->dc_lock); 4658 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { 4659 dc_allow_idle_optimizations(dm->dc, false); 4660 reallow_idle = true; 4661 } 4662 4663 if (caps.aux_support) { 4664 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4665 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4666 if (!rc) 4667 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4668 } else { 4669 struct set_backlight_level_params backlight_level_params = { 0 }; 4670 4671 backlight_level_params.backlight_pwm_u16_16 = brightness; 4672 backlight_level_params.transition_time_in_ms = 0; 4673 4674 rc = dc_link_set_backlight_level(link, &backlight_level_params); 4675 if (!rc) 4676 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4677 } 4678 4679 if (dm->dc->caps.ips_support && reallow_idle) 4680 dc_allow_idle_optimizations(dm->dc, true); 4681 4682 mutex_unlock(&dm->dc_lock); 4683 4684 if (rc) 4685 dm->actual_brightness[bl_idx] = user_brightness; 4686 } 4687 4688 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4689 { 4690 struct amdgpu_display_manager *dm = bl_get_data(bd); 4691 int i; 4692 4693 for (i = 0; i < dm->num_of_edps; i++) { 4694 if (bd == dm->backlight_dev[i]) 4695 break; 4696 } 4697 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4698 i = 0; 4699 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4700 4701 return 0; 4702 } 4703 4704 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4705 int bl_idx) 4706 { 4707 int ret; 4708 struct amdgpu_dm_backlight_caps caps; 4709 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4710 4711 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4712 caps = dm->backlight_caps[bl_idx]; 4713 4714 if (caps.aux_support) { 4715 u32 avg, peak; 4716 bool rc; 4717 4718 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4719 if (!rc) 4720 return dm->brightness[bl_idx]; 4721 return convert_brightness_to_user(&caps, avg); 4722 } 4723 4724 ret = dc_link_get_backlight_level(link); 4725 4726 if (ret == DC_ERROR_UNEXPECTED) 4727 return dm->brightness[bl_idx]; 4728 4729 return convert_brightness_to_user(&caps, ret); 4730 } 4731 4732 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4733 { 4734 struct amdgpu_display_manager *dm = bl_get_data(bd); 4735 int i; 4736 4737 for (i = 0; i < dm->num_of_edps; i++) { 4738 if (bd == dm->backlight_dev[i]) 4739 break; 4740 } 4741 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4742 i = 0; 4743 return amdgpu_dm_backlight_get_level(dm, i); 4744 } 4745 4746 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4747 .options = BL_CORE_SUSPENDRESUME, 4748 .get_brightness = amdgpu_dm_backlight_get_brightness, 4749 .update_status = amdgpu_dm_backlight_update_status, 4750 }; 4751 4752 static void 4753 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4754 { 4755 struct drm_device *drm = aconnector->base.dev; 4756 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4757 struct backlight_properties props = { 0 }; 4758 struct amdgpu_dm_backlight_caps caps = { 0 }; 4759 char bl_name[16]; 4760 4761 if (aconnector->bl_idx == -1) 4762 return; 4763 4764 if (!acpi_video_backlight_use_native()) { 4765 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4766 /* Try registering an ACPI video backlight device instead. */ 4767 acpi_video_register_backlight(); 4768 return; 4769 } 4770 4771 amdgpu_acpi_get_backlight_caps(&caps); 4772 if (caps.caps_valid) { 4773 if (power_supply_is_system_supplied() > 0) 4774 props.brightness = caps.ac_level; 4775 else 4776 props.brightness = caps.dc_level; 4777 } else 4778 props.brightness = AMDGPU_MAX_BL_LEVEL; 4779 4780 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4781 props.type = BACKLIGHT_RAW; 4782 4783 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4784 drm->primary->index + aconnector->bl_idx); 4785 4786 dm->backlight_dev[aconnector->bl_idx] = 4787 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4788 &amdgpu_dm_backlight_ops, &props); 4789 4790 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4791 DRM_ERROR("DM: Backlight registration failed!\n"); 4792 dm->backlight_dev[aconnector->bl_idx] = NULL; 4793 } else 4794 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4795 } 4796 4797 static int initialize_plane(struct amdgpu_display_manager *dm, 4798 struct amdgpu_mode_info *mode_info, int plane_id, 4799 enum drm_plane_type plane_type, 4800 const struct dc_plane_cap *plane_cap) 4801 { 4802 struct drm_plane *plane; 4803 unsigned long possible_crtcs; 4804 int ret = 0; 4805 4806 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4807 if (!plane) { 4808 DRM_ERROR("KMS: Failed to allocate plane\n"); 4809 return -ENOMEM; 4810 } 4811 plane->type = plane_type; 4812 4813 /* 4814 * HACK: IGT tests expect that the primary plane for a CRTC 4815 * can only have one possible CRTC. Only expose support for 4816 * any CRTC if they're not going to be used as a primary plane 4817 * for a CRTC - like overlay or underlay planes. 4818 */ 4819 possible_crtcs = 1 << plane_id; 4820 if (plane_id >= dm->dc->caps.max_streams) 4821 possible_crtcs = 0xff; 4822 4823 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4824 4825 if (ret) { 4826 DRM_ERROR("KMS: Failed to initialize plane\n"); 4827 kfree(plane); 4828 return ret; 4829 } 4830 4831 if (mode_info) 4832 mode_info->planes[plane_id] = plane; 4833 4834 return ret; 4835 } 4836 4837 4838 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4839 struct amdgpu_dm_connector *aconnector) 4840 { 4841 struct dc_link *link = aconnector->dc_link; 4842 int bl_idx = dm->num_of_edps; 4843 4844 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4845 link->type == dc_connection_none) 4846 return; 4847 4848 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4849 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4850 return; 4851 } 4852 4853 aconnector->bl_idx = bl_idx; 4854 4855 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4856 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4857 dm->backlight_link[bl_idx] = link; 4858 dm->num_of_edps++; 4859 4860 update_connector_ext_caps(aconnector); 4861 } 4862 4863 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4864 4865 /* 4866 * In this architecture, the association 4867 * connector -> encoder -> crtc 4868 * id not really requried. The crtc and connector will hold the 4869 * display_index as an abstraction to use with DAL component 4870 * 4871 * Returns 0 on success 4872 */ 4873 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4874 { 4875 struct amdgpu_display_manager *dm = &adev->dm; 4876 s32 i; 4877 struct amdgpu_dm_connector *aconnector = NULL; 4878 struct amdgpu_encoder *aencoder = NULL; 4879 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4880 u32 link_cnt; 4881 s32 primary_planes; 4882 enum dc_connection_type new_connection_type = dc_connection_none; 4883 const struct dc_plane_cap *plane; 4884 bool psr_feature_enabled = false; 4885 bool replay_feature_enabled = false; 4886 int max_overlay = dm->dc->caps.max_slave_planes; 4887 4888 dm->display_indexes_num = dm->dc->caps.max_streams; 4889 /* Update the actual used number of crtc */ 4890 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4891 4892 amdgpu_dm_set_irq_funcs(adev); 4893 4894 link_cnt = dm->dc->caps.max_links; 4895 if (amdgpu_dm_mode_config_init(dm->adev)) { 4896 DRM_ERROR("DM: Failed to initialize mode config\n"); 4897 return -EINVAL; 4898 } 4899 4900 /* There is one primary plane per CRTC */ 4901 primary_planes = dm->dc->caps.max_streams; 4902 if (primary_planes > AMDGPU_MAX_PLANES) { 4903 DRM_ERROR("DM: Plane nums out of 6 planes\n"); 4904 return -EINVAL; 4905 } 4906 4907 /* 4908 * Initialize primary planes, implicit planes for legacy IOCTLS. 4909 * Order is reversed to match iteration order in atomic check. 4910 */ 4911 for (i = (primary_planes - 1); i >= 0; i--) { 4912 plane = &dm->dc->caps.planes[i]; 4913 4914 if (initialize_plane(dm, mode_info, i, 4915 DRM_PLANE_TYPE_PRIMARY, plane)) { 4916 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4917 goto fail; 4918 } 4919 } 4920 4921 /* 4922 * Initialize overlay planes, index starting after primary planes. 4923 * These planes have a higher DRM index than the primary planes since 4924 * they should be considered as having a higher z-order. 4925 * Order is reversed to match iteration order in atomic check. 4926 * 4927 * Only support DCN for now, and only expose one so we don't encourage 4928 * userspace to use up all the pipes. 4929 */ 4930 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4931 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4932 4933 /* Do not create overlay if MPO disabled */ 4934 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4935 break; 4936 4937 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4938 continue; 4939 4940 if (!plane->pixel_format_support.argb8888) 4941 continue; 4942 4943 if (max_overlay-- == 0) 4944 break; 4945 4946 if (initialize_plane(dm, NULL, primary_planes + i, 4947 DRM_PLANE_TYPE_OVERLAY, plane)) { 4948 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4949 goto fail; 4950 } 4951 } 4952 4953 for (i = 0; i < dm->dc->caps.max_streams; i++) 4954 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4955 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4956 goto fail; 4957 } 4958 4959 /* Use Outbox interrupt */ 4960 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4961 case IP_VERSION(3, 0, 0): 4962 case IP_VERSION(3, 1, 2): 4963 case IP_VERSION(3, 1, 3): 4964 case IP_VERSION(3, 1, 4): 4965 case IP_VERSION(3, 1, 5): 4966 case IP_VERSION(3, 1, 6): 4967 case IP_VERSION(3, 2, 0): 4968 case IP_VERSION(3, 2, 1): 4969 case IP_VERSION(2, 1, 0): 4970 case IP_VERSION(3, 5, 0): 4971 case IP_VERSION(3, 5, 1): 4972 case IP_VERSION(4, 0, 1): 4973 if (register_outbox_irq_handlers(dm->adev)) { 4974 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4975 goto fail; 4976 } 4977 break; 4978 default: 4979 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4980 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4981 } 4982 4983 /* Determine whether to enable PSR support by default. */ 4984 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4985 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4986 case IP_VERSION(3, 1, 2): 4987 case IP_VERSION(3, 1, 3): 4988 case IP_VERSION(3, 1, 4): 4989 case IP_VERSION(3, 1, 5): 4990 case IP_VERSION(3, 1, 6): 4991 case IP_VERSION(3, 2, 0): 4992 case IP_VERSION(3, 2, 1): 4993 case IP_VERSION(3, 5, 0): 4994 case IP_VERSION(3, 5, 1): 4995 case IP_VERSION(4, 0, 1): 4996 psr_feature_enabled = true; 4997 break; 4998 default: 4999 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 5000 break; 5001 } 5002 } 5003 5004 /* Determine whether to enable Replay support by default. */ 5005 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 5006 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5007 case IP_VERSION(3, 1, 4): 5008 case IP_VERSION(3, 2, 0): 5009 case IP_VERSION(3, 2, 1): 5010 case IP_VERSION(3, 5, 0): 5011 case IP_VERSION(3, 5, 1): 5012 replay_feature_enabled = true; 5013 break; 5014 5015 default: 5016 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 5017 break; 5018 } 5019 } 5020 5021 if (link_cnt > MAX_LINKS) { 5022 DRM_ERROR( 5023 "KMS: Cannot support more than %d display indexes\n", 5024 MAX_LINKS); 5025 goto fail; 5026 } 5027 5028 /* loops over all connectors on the board */ 5029 for (i = 0; i < link_cnt; i++) { 5030 struct dc_link *link = NULL; 5031 5032 link = dc_get_link_at_index(dm->dc, i); 5033 5034 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5035 struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); 5036 5037 if (!wbcon) { 5038 DRM_ERROR("KMS: Failed to allocate writeback connector\n"); 5039 continue; 5040 } 5041 5042 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 5043 DRM_ERROR("KMS: Failed to initialize writeback connector\n"); 5044 kfree(wbcon); 5045 continue; 5046 } 5047 5048 link->psr_settings.psr_feature_enabled = false; 5049 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 5050 5051 continue; 5052 } 5053 5054 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 5055 if (!aconnector) 5056 goto fail; 5057 5058 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 5059 if (!aencoder) 5060 goto fail; 5061 5062 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 5063 DRM_ERROR("KMS: Failed to initialize encoder\n"); 5064 goto fail; 5065 } 5066 5067 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 5068 DRM_ERROR("KMS: Failed to initialize connector\n"); 5069 goto fail; 5070 } 5071 5072 if (dm->hpd_rx_offload_wq) 5073 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 5074 aconnector; 5075 5076 if (!dc_link_detect_connection_type(link, &new_connection_type)) 5077 DRM_ERROR("KMS: Failed to detect connector\n"); 5078 5079 if (aconnector->base.force && new_connection_type == dc_connection_none) { 5080 emulated_link_detect(link); 5081 amdgpu_dm_update_connector_after_detect(aconnector); 5082 } else { 5083 bool ret = false; 5084 5085 mutex_lock(&dm->dc_lock); 5086 dc_exit_ips_for_hw_access(dm->dc); 5087 ret = dc_link_detect(link, DETECT_REASON_BOOT); 5088 mutex_unlock(&dm->dc_lock); 5089 5090 if (ret) { 5091 amdgpu_dm_update_connector_after_detect(aconnector); 5092 setup_backlight_device(dm, aconnector); 5093 5094 /* Disable PSR if Replay can be enabled */ 5095 if (replay_feature_enabled) 5096 if (amdgpu_dm_set_replay_caps(link, aconnector)) 5097 psr_feature_enabled = false; 5098 5099 if (psr_feature_enabled) 5100 amdgpu_dm_set_psr_caps(link); 5101 } 5102 } 5103 amdgpu_set_panel_orientation(&aconnector->base); 5104 } 5105 5106 /* Software is initialized. Now we can register interrupt handlers. */ 5107 switch (adev->asic_type) { 5108 #if defined(CONFIG_DRM_AMD_DC_SI) 5109 case CHIP_TAHITI: 5110 case CHIP_PITCAIRN: 5111 case CHIP_VERDE: 5112 case CHIP_OLAND: 5113 if (dce60_register_irq_handlers(dm->adev)) { 5114 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5115 goto fail; 5116 } 5117 break; 5118 #endif 5119 case CHIP_BONAIRE: 5120 case CHIP_HAWAII: 5121 case CHIP_KAVERI: 5122 case CHIP_KABINI: 5123 case CHIP_MULLINS: 5124 case CHIP_TONGA: 5125 case CHIP_FIJI: 5126 case CHIP_CARRIZO: 5127 case CHIP_STONEY: 5128 case CHIP_POLARIS11: 5129 case CHIP_POLARIS10: 5130 case CHIP_POLARIS12: 5131 case CHIP_VEGAM: 5132 case CHIP_VEGA10: 5133 case CHIP_VEGA12: 5134 case CHIP_VEGA20: 5135 if (dce110_register_irq_handlers(dm->adev)) { 5136 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5137 goto fail; 5138 } 5139 break; 5140 default: 5141 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5142 case IP_VERSION(1, 0, 0): 5143 case IP_VERSION(1, 0, 1): 5144 case IP_VERSION(2, 0, 2): 5145 case IP_VERSION(2, 0, 3): 5146 case IP_VERSION(2, 0, 0): 5147 case IP_VERSION(2, 1, 0): 5148 case IP_VERSION(3, 0, 0): 5149 case IP_VERSION(3, 0, 2): 5150 case IP_VERSION(3, 0, 3): 5151 case IP_VERSION(3, 0, 1): 5152 case IP_VERSION(3, 1, 2): 5153 case IP_VERSION(3, 1, 3): 5154 case IP_VERSION(3, 1, 4): 5155 case IP_VERSION(3, 1, 5): 5156 case IP_VERSION(3, 1, 6): 5157 case IP_VERSION(3, 2, 0): 5158 case IP_VERSION(3, 2, 1): 5159 case IP_VERSION(3, 5, 0): 5160 case IP_VERSION(3, 5, 1): 5161 case IP_VERSION(4, 0, 1): 5162 if (dcn10_register_irq_handlers(dm->adev)) { 5163 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5164 goto fail; 5165 } 5166 break; 5167 default: 5168 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 5169 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5170 goto fail; 5171 } 5172 break; 5173 } 5174 5175 return 0; 5176 fail: 5177 kfree(aencoder); 5178 kfree(aconnector); 5179 5180 return -EINVAL; 5181 } 5182 5183 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 5184 { 5185 drm_atomic_private_obj_fini(&dm->atomic_obj); 5186 } 5187 5188 /****************************************************************************** 5189 * amdgpu_display_funcs functions 5190 *****************************************************************************/ 5191 5192 /* 5193 * dm_bandwidth_update - program display watermarks 5194 * 5195 * @adev: amdgpu_device pointer 5196 * 5197 * Calculate and program the display watermarks and line buffer allocation. 5198 */ 5199 static void dm_bandwidth_update(struct amdgpu_device *adev) 5200 { 5201 /* TODO: implement later */ 5202 } 5203 5204 static const struct amdgpu_display_funcs dm_display_funcs = { 5205 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 5206 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 5207 .backlight_set_level = NULL, /* never called for DC */ 5208 .backlight_get_level = NULL, /* never called for DC */ 5209 .hpd_sense = NULL,/* called unconditionally */ 5210 .hpd_set_polarity = NULL, /* called unconditionally */ 5211 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 5212 .page_flip_get_scanoutpos = 5213 dm_crtc_get_scanoutpos,/* called unconditionally */ 5214 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 5215 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 5216 }; 5217 5218 #if defined(CONFIG_DEBUG_KERNEL_DC) 5219 5220 static ssize_t s3_debug_store(struct device *device, 5221 struct device_attribute *attr, 5222 const char *buf, 5223 size_t count) 5224 { 5225 int ret; 5226 int s3_state; 5227 struct drm_device *drm_dev = dev_get_drvdata(device); 5228 struct amdgpu_device *adev = drm_to_adev(drm_dev); 5229 struct amdgpu_ip_block *ip_block; 5230 5231 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); 5232 if (!ip_block) 5233 return -EINVAL; 5234 5235 ret = kstrtoint(buf, 0, &s3_state); 5236 5237 if (ret == 0) { 5238 if (s3_state) { 5239 dm_resume(ip_block); 5240 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 5241 } else 5242 dm_suspend(ip_block); 5243 } 5244 5245 return ret == 0 ? count : 0; 5246 } 5247 5248 DEVICE_ATTR_WO(s3_debug); 5249 5250 #endif 5251 5252 static int dm_init_microcode(struct amdgpu_device *adev) 5253 { 5254 char *fw_name_dmub; 5255 int r; 5256 5257 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5258 case IP_VERSION(2, 1, 0): 5259 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 5260 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 5261 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 5262 break; 5263 case IP_VERSION(3, 0, 0): 5264 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 5265 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 5266 else 5267 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 5268 break; 5269 case IP_VERSION(3, 0, 1): 5270 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 5271 break; 5272 case IP_VERSION(3, 0, 2): 5273 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 5274 break; 5275 case IP_VERSION(3, 0, 3): 5276 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 5277 break; 5278 case IP_VERSION(3, 1, 2): 5279 case IP_VERSION(3, 1, 3): 5280 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 5281 break; 5282 case IP_VERSION(3, 1, 4): 5283 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 5284 break; 5285 case IP_VERSION(3, 1, 5): 5286 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 5287 break; 5288 case IP_VERSION(3, 1, 6): 5289 fw_name_dmub = FIRMWARE_DCN316_DMUB; 5290 break; 5291 case IP_VERSION(3, 2, 0): 5292 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 5293 break; 5294 case IP_VERSION(3, 2, 1): 5295 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 5296 break; 5297 case IP_VERSION(3, 5, 0): 5298 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 5299 break; 5300 case IP_VERSION(3, 5, 1): 5301 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 5302 break; 5303 case IP_VERSION(4, 0, 1): 5304 fw_name_dmub = FIRMWARE_DCN_401_DMUB; 5305 break; 5306 default: 5307 /* ASIC doesn't support DMUB. */ 5308 return 0; 5309 } 5310 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, 5311 "%s", fw_name_dmub); 5312 return r; 5313 } 5314 5315 static int dm_early_init(struct amdgpu_ip_block *ip_block) 5316 { 5317 struct amdgpu_device *adev = ip_block->adev; 5318 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5319 struct atom_context *ctx = mode_info->atom_context; 5320 int index = GetIndexIntoMasterTable(DATA, Object_Header); 5321 u16 data_offset; 5322 5323 /* if there is no object header, skip DM */ 5324 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 5325 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 5326 dev_info(adev->dev, "No object header, skipping DM\n"); 5327 return -ENOENT; 5328 } 5329 5330 switch (adev->asic_type) { 5331 #if defined(CONFIG_DRM_AMD_DC_SI) 5332 case CHIP_TAHITI: 5333 case CHIP_PITCAIRN: 5334 case CHIP_VERDE: 5335 adev->mode_info.num_crtc = 6; 5336 adev->mode_info.num_hpd = 6; 5337 adev->mode_info.num_dig = 6; 5338 break; 5339 case CHIP_OLAND: 5340 adev->mode_info.num_crtc = 2; 5341 adev->mode_info.num_hpd = 2; 5342 adev->mode_info.num_dig = 2; 5343 break; 5344 #endif 5345 case CHIP_BONAIRE: 5346 case CHIP_HAWAII: 5347 adev->mode_info.num_crtc = 6; 5348 adev->mode_info.num_hpd = 6; 5349 adev->mode_info.num_dig = 6; 5350 break; 5351 case CHIP_KAVERI: 5352 adev->mode_info.num_crtc = 4; 5353 adev->mode_info.num_hpd = 6; 5354 adev->mode_info.num_dig = 7; 5355 break; 5356 case CHIP_KABINI: 5357 case CHIP_MULLINS: 5358 adev->mode_info.num_crtc = 2; 5359 adev->mode_info.num_hpd = 6; 5360 adev->mode_info.num_dig = 6; 5361 break; 5362 case CHIP_FIJI: 5363 case CHIP_TONGA: 5364 adev->mode_info.num_crtc = 6; 5365 adev->mode_info.num_hpd = 6; 5366 adev->mode_info.num_dig = 7; 5367 break; 5368 case CHIP_CARRIZO: 5369 adev->mode_info.num_crtc = 3; 5370 adev->mode_info.num_hpd = 6; 5371 adev->mode_info.num_dig = 9; 5372 break; 5373 case CHIP_STONEY: 5374 adev->mode_info.num_crtc = 2; 5375 adev->mode_info.num_hpd = 6; 5376 adev->mode_info.num_dig = 9; 5377 break; 5378 case CHIP_POLARIS11: 5379 case CHIP_POLARIS12: 5380 adev->mode_info.num_crtc = 5; 5381 adev->mode_info.num_hpd = 5; 5382 adev->mode_info.num_dig = 5; 5383 break; 5384 case CHIP_POLARIS10: 5385 case CHIP_VEGAM: 5386 adev->mode_info.num_crtc = 6; 5387 adev->mode_info.num_hpd = 6; 5388 adev->mode_info.num_dig = 6; 5389 break; 5390 case CHIP_VEGA10: 5391 case CHIP_VEGA12: 5392 case CHIP_VEGA20: 5393 adev->mode_info.num_crtc = 6; 5394 adev->mode_info.num_hpd = 6; 5395 adev->mode_info.num_dig = 6; 5396 break; 5397 default: 5398 5399 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5400 case IP_VERSION(2, 0, 2): 5401 case IP_VERSION(3, 0, 0): 5402 adev->mode_info.num_crtc = 6; 5403 adev->mode_info.num_hpd = 6; 5404 adev->mode_info.num_dig = 6; 5405 break; 5406 case IP_VERSION(2, 0, 0): 5407 case IP_VERSION(3, 0, 2): 5408 adev->mode_info.num_crtc = 5; 5409 adev->mode_info.num_hpd = 5; 5410 adev->mode_info.num_dig = 5; 5411 break; 5412 case IP_VERSION(2, 0, 3): 5413 case IP_VERSION(3, 0, 3): 5414 adev->mode_info.num_crtc = 2; 5415 adev->mode_info.num_hpd = 2; 5416 adev->mode_info.num_dig = 2; 5417 break; 5418 case IP_VERSION(1, 0, 0): 5419 case IP_VERSION(1, 0, 1): 5420 case IP_VERSION(3, 0, 1): 5421 case IP_VERSION(2, 1, 0): 5422 case IP_VERSION(3, 1, 2): 5423 case IP_VERSION(3, 1, 3): 5424 case IP_VERSION(3, 1, 4): 5425 case IP_VERSION(3, 1, 5): 5426 case IP_VERSION(3, 1, 6): 5427 case IP_VERSION(3, 2, 0): 5428 case IP_VERSION(3, 2, 1): 5429 case IP_VERSION(3, 5, 0): 5430 case IP_VERSION(3, 5, 1): 5431 case IP_VERSION(4, 0, 1): 5432 adev->mode_info.num_crtc = 4; 5433 adev->mode_info.num_hpd = 4; 5434 adev->mode_info.num_dig = 4; 5435 break; 5436 default: 5437 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 5438 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5439 return -EINVAL; 5440 } 5441 break; 5442 } 5443 5444 if (adev->mode_info.funcs == NULL) 5445 adev->mode_info.funcs = &dm_display_funcs; 5446 5447 /* 5448 * Note: Do NOT change adev->audio_endpt_rreg and 5449 * adev->audio_endpt_wreg because they are initialised in 5450 * amdgpu_device_init() 5451 */ 5452 #if defined(CONFIG_DEBUG_KERNEL_DC) 5453 device_create_file( 5454 adev_to_drm(adev)->dev, 5455 &dev_attr_s3_debug); 5456 #endif 5457 adev->dc_enabled = true; 5458 5459 return dm_init_microcode(adev); 5460 } 5461 5462 static bool modereset_required(struct drm_crtc_state *crtc_state) 5463 { 5464 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 5465 } 5466 5467 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 5468 { 5469 drm_encoder_cleanup(encoder); 5470 kfree(encoder); 5471 } 5472 5473 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 5474 .destroy = amdgpu_dm_encoder_destroy, 5475 }; 5476 5477 static int 5478 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5479 const enum surface_pixel_format format, 5480 enum dc_color_space *color_space) 5481 { 5482 bool full_range; 5483 5484 *color_space = COLOR_SPACE_SRGB; 5485 5486 /* DRM color properties only affect non-RGB formats. */ 5487 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5488 return 0; 5489 5490 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5491 5492 switch (plane_state->color_encoding) { 5493 case DRM_COLOR_YCBCR_BT601: 5494 if (full_range) 5495 *color_space = COLOR_SPACE_YCBCR601; 5496 else 5497 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5498 break; 5499 5500 case DRM_COLOR_YCBCR_BT709: 5501 if (full_range) 5502 *color_space = COLOR_SPACE_YCBCR709; 5503 else 5504 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5505 break; 5506 5507 case DRM_COLOR_YCBCR_BT2020: 5508 if (full_range) 5509 *color_space = COLOR_SPACE_2020_YCBCR; 5510 else 5511 return -EINVAL; 5512 break; 5513 5514 default: 5515 return -EINVAL; 5516 } 5517 5518 return 0; 5519 } 5520 5521 static int 5522 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5523 const struct drm_plane_state *plane_state, 5524 const u64 tiling_flags, 5525 struct dc_plane_info *plane_info, 5526 struct dc_plane_address *address, 5527 bool tmz_surface) 5528 { 5529 const struct drm_framebuffer *fb = plane_state->fb; 5530 const struct amdgpu_framebuffer *afb = 5531 to_amdgpu_framebuffer(plane_state->fb); 5532 int ret; 5533 5534 memset(plane_info, 0, sizeof(*plane_info)); 5535 5536 switch (fb->format->format) { 5537 case DRM_FORMAT_C8: 5538 plane_info->format = 5539 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5540 break; 5541 case DRM_FORMAT_RGB565: 5542 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5543 break; 5544 case DRM_FORMAT_XRGB8888: 5545 case DRM_FORMAT_ARGB8888: 5546 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5547 break; 5548 case DRM_FORMAT_XRGB2101010: 5549 case DRM_FORMAT_ARGB2101010: 5550 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5551 break; 5552 case DRM_FORMAT_XBGR2101010: 5553 case DRM_FORMAT_ABGR2101010: 5554 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5555 break; 5556 case DRM_FORMAT_XBGR8888: 5557 case DRM_FORMAT_ABGR8888: 5558 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5559 break; 5560 case DRM_FORMAT_NV21: 5561 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5562 break; 5563 case DRM_FORMAT_NV12: 5564 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5565 break; 5566 case DRM_FORMAT_P010: 5567 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5568 break; 5569 case DRM_FORMAT_XRGB16161616F: 5570 case DRM_FORMAT_ARGB16161616F: 5571 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5572 break; 5573 case DRM_FORMAT_XBGR16161616F: 5574 case DRM_FORMAT_ABGR16161616F: 5575 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5576 break; 5577 case DRM_FORMAT_XRGB16161616: 5578 case DRM_FORMAT_ARGB16161616: 5579 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5580 break; 5581 case DRM_FORMAT_XBGR16161616: 5582 case DRM_FORMAT_ABGR16161616: 5583 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5584 break; 5585 default: 5586 DRM_ERROR( 5587 "Unsupported screen format %p4cc\n", 5588 &fb->format->format); 5589 return -EINVAL; 5590 } 5591 5592 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5593 case DRM_MODE_ROTATE_0: 5594 plane_info->rotation = ROTATION_ANGLE_0; 5595 break; 5596 case DRM_MODE_ROTATE_90: 5597 plane_info->rotation = ROTATION_ANGLE_90; 5598 break; 5599 case DRM_MODE_ROTATE_180: 5600 plane_info->rotation = ROTATION_ANGLE_180; 5601 break; 5602 case DRM_MODE_ROTATE_270: 5603 plane_info->rotation = ROTATION_ANGLE_270; 5604 break; 5605 default: 5606 plane_info->rotation = ROTATION_ANGLE_0; 5607 break; 5608 } 5609 5610 5611 plane_info->visible = true; 5612 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5613 5614 plane_info->layer_index = plane_state->normalized_zpos; 5615 5616 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5617 &plane_info->color_space); 5618 if (ret) 5619 return ret; 5620 5621 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5622 plane_info->rotation, tiling_flags, 5623 &plane_info->tiling_info, 5624 &plane_info->plane_size, 5625 &plane_info->dcc, address, 5626 tmz_surface); 5627 if (ret) 5628 return ret; 5629 5630 amdgpu_dm_plane_fill_blending_from_plane_state( 5631 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5632 &plane_info->global_alpha, &plane_info->global_alpha_value); 5633 5634 return 0; 5635 } 5636 5637 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5638 struct dc_plane_state *dc_plane_state, 5639 struct drm_plane_state *plane_state, 5640 struct drm_crtc_state *crtc_state) 5641 { 5642 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5643 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5644 struct dc_scaling_info scaling_info; 5645 struct dc_plane_info plane_info; 5646 int ret; 5647 5648 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5649 if (ret) 5650 return ret; 5651 5652 dc_plane_state->src_rect = scaling_info.src_rect; 5653 dc_plane_state->dst_rect = scaling_info.dst_rect; 5654 dc_plane_state->clip_rect = scaling_info.clip_rect; 5655 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5656 5657 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5658 afb->tiling_flags, 5659 &plane_info, 5660 &dc_plane_state->address, 5661 afb->tmz_surface); 5662 if (ret) 5663 return ret; 5664 5665 dc_plane_state->format = plane_info.format; 5666 dc_plane_state->color_space = plane_info.color_space; 5667 dc_plane_state->format = plane_info.format; 5668 dc_plane_state->plane_size = plane_info.plane_size; 5669 dc_plane_state->rotation = plane_info.rotation; 5670 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5671 dc_plane_state->stereo_format = plane_info.stereo_format; 5672 dc_plane_state->tiling_info = plane_info.tiling_info; 5673 dc_plane_state->visible = plane_info.visible; 5674 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5675 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5676 dc_plane_state->global_alpha = plane_info.global_alpha; 5677 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5678 dc_plane_state->dcc = plane_info.dcc; 5679 dc_plane_state->layer_index = plane_info.layer_index; 5680 dc_plane_state->flip_int_enabled = true; 5681 5682 /* 5683 * Always set input transfer function, since plane state is refreshed 5684 * every time. 5685 */ 5686 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, 5687 plane_state, 5688 dc_plane_state); 5689 if (ret) 5690 return ret; 5691 5692 return 0; 5693 } 5694 5695 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5696 struct rect *dirty_rect, int32_t x, 5697 s32 y, s32 width, s32 height, 5698 int *i, bool ffu) 5699 { 5700 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5701 5702 dirty_rect->x = x; 5703 dirty_rect->y = y; 5704 dirty_rect->width = width; 5705 dirty_rect->height = height; 5706 5707 if (ffu) 5708 drm_dbg(plane->dev, 5709 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5710 plane->base.id, width, height); 5711 else 5712 drm_dbg(plane->dev, 5713 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5714 plane->base.id, x, y, width, height); 5715 5716 (*i)++; 5717 } 5718 5719 /** 5720 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5721 * 5722 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5723 * remote fb 5724 * @old_plane_state: Old state of @plane 5725 * @new_plane_state: New state of @plane 5726 * @crtc_state: New state of CRTC connected to the @plane 5727 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5728 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. 5729 * If PSR SU is enabled and damage clips are available, only the regions of the screen 5730 * that have changed will be updated. If PSR SU is not enabled, 5731 * or if damage clips are not available, the entire screen will be updated. 5732 * @dirty_regions_changed: dirty regions changed 5733 * 5734 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5735 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5736 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5737 * amdgpu_dm's. 5738 * 5739 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5740 * plane with regions that require flushing to the eDP remote buffer. In 5741 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5742 * implicitly provide damage clips without any client support via the plane 5743 * bounds. 5744 */ 5745 static void fill_dc_dirty_rects(struct drm_plane *plane, 5746 struct drm_plane_state *old_plane_state, 5747 struct drm_plane_state *new_plane_state, 5748 struct drm_crtc_state *crtc_state, 5749 struct dc_flip_addrs *flip_addrs, 5750 bool is_psr_su, 5751 bool *dirty_regions_changed) 5752 { 5753 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5754 struct rect *dirty_rects = flip_addrs->dirty_rects; 5755 u32 num_clips; 5756 struct drm_mode_rect *clips; 5757 bool bb_changed; 5758 bool fb_changed; 5759 u32 i = 0; 5760 *dirty_regions_changed = false; 5761 5762 /* 5763 * Cursor plane has it's own dirty rect update interface. See 5764 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5765 */ 5766 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5767 return; 5768 5769 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5770 goto ffu; 5771 5772 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5773 clips = drm_plane_get_damage_clips(new_plane_state); 5774 5775 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && 5776 is_psr_su))) 5777 goto ffu; 5778 5779 if (!dm_crtc_state->mpo_requested) { 5780 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5781 goto ffu; 5782 5783 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5784 fill_dc_dirty_rect(new_plane_state->plane, 5785 &dirty_rects[flip_addrs->dirty_rect_count], 5786 clips->x1, clips->y1, 5787 clips->x2 - clips->x1, clips->y2 - clips->y1, 5788 &flip_addrs->dirty_rect_count, 5789 false); 5790 return; 5791 } 5792 5793 /* 5794 * MPO is requested. Add entire plane bounding box to dirty rects if 5795 * flipped to or damaged. 5796 * 5797 * If plane is moved or resized, also add old bounding box to dirty 5798 * rects. 5799 */ 5800 fb_changed = old_plane_state->fb->base.id != 5801 new_plane_state->fb->base.id; 5802 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5803 old_plane_state->crtc_y != new_plane_state->crtc_y || 5804 old_plane_state->crtc_w != new_plane_state->crtc_w || 5805 old_plane_state->crtc_h != new_plane_state->crtc_h); 5806 5807 drm_dbg(plane->dev, 5808 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5809 new_plane_state->plane->base.id, 5810 bb_changed, fb_changed, num_clips); 5811 5812 *dirty_regions_changed = bb_changed; 5813 5814 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5815 goto ffu; 5816 5817 if (bb_changed) { 5818 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5819 new_plane_state->crtc_x, 5820 new_plane_state->crtc_y, 5821 new_plane_state->crtc_w, 5822 new_plane_state->crtc_h, &i, false); 5823 5824 /* Add old plane bounding-box if plane is moved or resized */ 5825 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5826 old_plane_state->crtc_x, 5827 old_plane_state->crtc_y, 5828 old_plane_state->crtc_w, 5829 old_plane_state->crtc_h, &i, false); 5830 } 5831 5832 if (num_clips) { 5833 for (; i < num_clips; clips++) 5834 fill_dc_dirty_rect(new_plane_state->plane, 5835 &dirty_rects[i], clips->x1, 5836 clips->y1, clips->x2 - clips->x1, 5837 clips->y2 - clips->y1, &i, false); 5838 } else if (fb_changed && !bb_changed) { 5839 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5840 new_plane_state->crtc_x, 5841 new_plane_state->crtc_y, 5842 new_plane_state->crtc_w, 5843 new_plane_state->crtc_h, &i, false); 5844 } 5845 5846 flip_addrs->dirty_rect_count = i; 5847 return; 5848 5849 ffu: 5850 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5851 dm_crtc_state->base.mode.crtc_hdisplay, 5852 dm_crtc_state->base.mode.crtc_vdisplay, 5853 &flip_addrs->dirty_rect_count, true); 5854 } 5855 5856 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5857 const struct dm_connector_state *dm_state, 5858 struct dc_stream_state *stream) 5859 { 5860 enum amdgpu_rmx_type rmx_type; 5861 5862 struct rect src = { 0 }; /* viewport in composition space*/ 5863 struct rect dst = { 0 }; /* stream addressable area */ 5864 5865 /* no mode. nothing to be done */ 5866 if (!mode) 5867 return; 5868 5869 /* Full screen scaling by default */ 5870 src.width = mode->hdisplay; 5871 src.height = mode->vdisplay; 5872 dst.width = stream->timing.h_addressable; 5873 dst.height = stream->timing.v_addressable; 5874 5875 if (dm_state) { 5876 rmx_type = dm_state->scaling; 5877 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5878 if (src.width * dst.height < 5879 src.height * dst.width) { 5880 /* height needs less upscaling/more downscaling */ 5881 dst.width = src.width * 5882 dst.height / src.height; 5883 } else { 5884 /* width needs less upscaling/more downscaling */ 5885 dst.height = src.height * 5886 dst.width / src.width; 5887 } 5888 } else if (rmx_type == RMX_CENTER) { 5889 dst = src; 5890 } 5891 5892 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5893 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5894 5895 if (dm_state->underscan_enable) { 5896 dst.x += dm_state->underscan_hborder / 2; 5897 dst.y += dm_state->underscan_vborder / 2; 5898 dst.width -= dm_state->underscan_hborder; 5899 dst.height -= dm_state->underscan_vborder; 5900 } 5901 } 5902 5903 stream->src = src; 5904 stream->dst = dst; 5905 5906 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5907 dst.x, dst.y, dst.width, dst.height); 5908 5909 } 5910 5911 static enum dc_color_depth 5912 convert_color_depth_from_display_info(const struct drm_connector *connector, 5913 bool is_y420, int requested_bpc) 5914 { 5915 u8 bpc; 5916 5917 if (is_y420) { 5918 bpc = 8; 5919 5920 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5921 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5922 bpc = 16; 5923 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5924 bpc = 12; 5925 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5926 bpc = 10; 5927 } else { 5928 bpc = (uint8_t)connector->display_info.bpc; 5929 /* Assume 8 bpc by default if no bpc is specified. */ 5930 bpc = bpc ? bpc : 8; 5931 } 5932 5933 if (requested_bpc > 0) { 5934 /* 5935 * Cap display bpc based on the user requested value. 5936 * 5937 * The value for state->max_bpc may not correctly updated 5938 * depending on when the connector gets added to the state 5939 * or if this was called outside of atomic check, so it 5940 * can't be used directly. 5941 */ 5942 bpc = min_t(u8, bpc, requested_bpc); 5943 5944 /* Round down to the nearest even number. */ 5945 bpc = bpc - (bpc & 1); 5946 } 5947 5948 switch (bpc) { 5949 case 0: 5950 /* 5951 * Temporary Work around, DRM doesn't parse color depth for 5952 * EDID revision before 1.4 5953 * TODO: Fix edid parsing 5954 */ 5955 return COLOR_DEPTH_888; 5956 case 6: 5957 return COLOR_DEPTH_666; 5958 case 8: 5959 return COLOR_DEPTH_888; 5960 case 10: 5961 return COLOR_DEPTH_101010; 5962 case 12: 5963 return COLOR_DEPTH_121212; 5964 case 14: 5965 return COLOR_DEPTH_141414; 5966 case 16: 5967 return COLOR_DEPTH_161616; 5968 default: 5969 return COLOR_DEPTH_UNDEFINED; 5970 } 5971 } 5972 5973 static enum dc_aspect_ratio 5974 get_aspect_ratio(const struct drm_display_mode *mode_in) 5975 { 5976 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5977 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5978 } 5979 5980 static enum dc_color_space 5981 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5982 const struct drm_connector_state *connector_state) 5983 { 5984 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5985 5986 switch (connector_state->colorspace) { 5987 case DRM_MODE_COLORIMETRY_BT601_YCC: 5988 if (dc_crtc_timing->flags.Y_ONLY) 5989 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5990 else 5991 color_space = COLOR_SPACE_YCBCR601; 5992 break; 5993 case DRM_MODE_COLORIMETRY_BT709_YCC: 5994 if (dc_crtc_timing->flags.Y_ONLY) 5995 color_space = COLOR_SPACE_YCBCR709_LIMITED; 5996 else 5997 color_space = COLOR_SPACE_YCBCR709; 5998 break; 5999 case DRM_MODE_COLORIMETRY_OPRGB: 6000 color_space = COLOR_SPACE_ADOBERGB; 6001 break; 6002 case DRM_MODE_COLORIMETRY_BT2020_RGB: 6003 case DRM_MODE_COLORIMETRY_BT2020_YCC: 6004 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 6005 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 6006 else 6007 color_space = COLOR_SPACE_2020_YCBCR; 6008 break; 6009 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 6010 default: 6011 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 6012 color_space = COLOR_SPACE_SRGB; 6013 /* 6014 * 27030khz is the separation point between HDTV and SDTV 6015 * according to HDMI spec, we use YCbCr709 and YCbCr601 6016 * respectively 6017 */ 6018 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 6019 if (dc_crtc_timing->flags.Y_ONLY) 6020 color_space = 6021 COLOR_SPACE_YCBCR709_LIMITED; 6022 else 6023 color_space = COLOR_SPACE_YCBCR709; 6024 } else { 6025 if (dc_crtc_timing->flags.Y_ONLY) 6026 color_space = 6027 COLOR_SPACE_YCBCR601_LIMITED; 6028 else 6029 color_space = COLOR_SPACE_YCBCR601; 6030 } 6031 break; 6032 } 6033 6034 return color_space; 6035 } 6036 6037 static enum display_content_type 6038 get_output_content_type(const struct drm_connector_state *connector_state) 6039 { 6040 switch (connector_state->content_type) { 6041 default: 6042 case DRM_MODE_CONTENT_TYPE_NO_DATA: 6043 return DISPLAY_CONTENT_TYPE_NO_DATA; 6044 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 6045 return DISPLAY_CONTENT_TYPE_GRAPHICS; 6046 case DRM_MODE_CONTENT_TYPE_PHOTO: 6047 return DISPLAY_CONTENT_TYPE_PHOTO; 6048 case DRM_MODE_CONTENT_TYPE_CINEMA: 6049 return DISPLAY_CONTENT_TYPE_CINEMA; 6050 case DRM_MODE_CONTENT_TYPE_GAME: 6051 return DISPLAY_CONTENT_TYPE_GAME; 6052 } 6053 } 6054 6055 static bool adjust_colour_depth_from_display_info( 6056 struct dc_crtc_timing *timing_out, 6057 const struct drm_display_info *info) 6058 { 6059 enum dc_color_depth depth = timing_out->display_color_depth; 6060 int normalized_clk; 6061 6062 do { 6063 normalized_clk = timing_out->pix_clk_100hz / 10; 6064 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6065 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6066 normalized_clk /= 2; 6067 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6068 switch (depth) { 6069 case COLOR_DEPTH_888: 6070 break; 6071 case COLOR_DEPTH_101010: 6072 normalized_clk = (normalized_clk * 30) / 24; 6073 break; 6074 case COLOR_DEPTH_121212: 6075 normalized_clk = (normalized_clk * 36) / 24; 6076 break; 6077 case COLOR_DEPTH_161616: 6078 normalized_clk = (normalized_clk * 48) / 24; 6079 break; 6080 default: 6081 /* The above depths are the only ones valid for HDMI. */ 6082 return false; 6083 } 6084 if (normalized_clk <= info->max_tmds_clock) { 6085 timing_out->display_color_depth = depth; 6086 return true; 6087 } 6088 } while (--depth > COLOR_DEPTH_666); 6089 return false; 6090 } 6091 6092 static void fill_stream_properties_from_drm_display_mode( 6093 struct dc_stream_state *stream, 6094 const struct drm_display_mode *mode_in, 6095 const struct drm_connector *connector, 6096 const struct drm_connector_state *connector_state, 6097 const struct dc_stream_state *old_stream, 6098 int requested_bpc) 6099 { 6100 struct dc_crtc_timing *timing_out = &stream->timing; 6101 const struct drm_display_info *info = &connector->display_info; 6102 struct amdgpu_dm_connector *aconnector = NULL; 6103 struct hdmi_vendor_infoframe hv_frame; 6104 struct hdmi_avi_infoframe avi_frame; 6105 6106 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 6107 aconnector = to_amdgpu_dm_connector(connector); 6108 6109 memset(&hv_frame, 0, sizeof(hv_frame)); 6110 memset(&avi_frame, 0, sizeof(avi_frame)); 6111 6112 timing_out->h_border_left = 0; 6113 timing_out->h_border_right = 0; 6114 timing_out->v_border_top = 0; 6115 timing_out->v_border_bottom = 0; 6116 /* TODO: un-hardcode */ 6117 if (drm_mode_is_420_only(info, mode_in) 6118 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6119 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6120 else if (drm_mode_is_420_also(info, mode_in) 6121 && aconnector 6122 && aconnector->force_yuv420_output) 6123 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6124 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6125 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6126 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6127 else 6128 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6129 6130 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6131 timing_out->display_color_depth = convert_color_depth_from_display_info( 6132 connector, 6133 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6134 requested_bpc); 6135 timing_out->scan_type = SCANNING_TYPE_NODATA; 6136 timing_out->hdmi_vic = 0; 6137 6138 if (old_stream) { 6139 timing_out->vic = old_stream->timing.vic; 6140 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6141 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6142 } else { 6143 timing_out->vic = drm_match_cea_mode(mode_in); 6144 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6145 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6146 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6147 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6148 } 6149 6150 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6151 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 6152 timing_out->vic = avi_frame.video_code; 6153 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 6154 timing_out->hdmi_vic = hv_frame.vic; 6155 } 6156 6157 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 6158 timing_out->h_addressable = mode_in->hdisplay; 6159 timing_out->h_total = mode_in->htotal; 6160 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6161 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6162 timing_out->v_total = mode_in->vtotal; 6163 timing_out->v_addressable = mode_in->vdisplay; 6164 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6165 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6166 timing_out->pix_clk_100hz = mode_in->clock * 10; 6167 } else { 6168 timing_out->h_addressable = mode_in->crtc_hdisplay; 6169 timing_out->h_total = mode_in->crtc_htotal; 6170 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6171 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6172 timing_out->v_total = mode_in->crtc_vtotal; 6173 timing_out->v_addressable = mode_in->crtc_vdisplay; 6174 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6175 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6176 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6177 } 6178 6179 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6180 6181 stream->out_transfer_func.type = TF_TYPE_PREDEFINED; 6182 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; 6183 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6184 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6185 drm_mode_is_420_also(info, mode_in) && 6186 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6187 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6188 adjust_colour_depth_from_display_info(timing_out, info); 6189 } 6190 } 6191 6192 stream->output_color_space = get_output_color_space(timing_out, connector_state); 6193 stream->content_type = get_output_content_type(connector_state); 6194 } 6195 6196 static void fill_audio_info(struct audio_info *audio_info, 6197 const struct drm_connector *drm_connector, 6198 const struct dc_sink *dc_sink) 6199 { 6200 int i = 0; 6201 int cea_revision = 0; 6202 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6203 6204 audio_info->manufacture_id = edid_caps->manufacturer_id; 6205 audio_info->product_id = edid_caps->product_id; 6206 6207 cea_revision = drm_connector->display_info.cea_rev; 6208 6209 strscpy(audio_info->display_name, 6210 edid_caps->display_name, 6211 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6212 6213 if (cea_revision >= 3) { 6214 audio_info->mode_count = edid_caps->audio_mode_count; 6215 6216 for (i = 0; i < audio_info->mode_count; ++i) { 6217 audio_info->modes[i].format_code = 6218 (enum audio_format_code) 6219 (edid_caps->audio_modes[i].format_code); 6220 audio_info->modes[i].channel_count = 6221 edid_caps->audio_modes[i].channel_count; 6222 audio_info->modes[i].sample_rates.all = 6223 edid_caps->audio_modes[i].sample_rate; 6224 audio_info->modes[i].sample_size = 6225 edid_caps->audio_modes[i].sample_size; 6226 } 6227 } 6228 6229 audio_info->flags.all = edid_caps->speaker_flags; 6230 6231 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6232 if (drm_connector->latency_present[0]) { 6233 audio_info->video_latency = drm_connector->video_latency[0]; 6234 audio_info->audio_latency = drm_connector->audio_latency[0]; 6235 } 6236 6237 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6238 6239 } 6240 6241 static void 6242 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6243 struct drm_display_mode *dst_mode) 6244 { 6245 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6246 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6247 dst_mode->crtc_clock = src_mode->crtc_clock; 6248 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6249 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6250 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6251 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6252 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6253 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6254 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6255 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6256 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6257 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6258 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6259 } 6260 6261 static void 6262 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6263 const struct drm_display_mode *native_mode, 6264 bool scale_enabled) 6265 { 6266 if (scale_enabled) { 6267 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6268 } else if (native_mode->clock == drm_mode->clock && 6269 native_mode->htotal == drm_mode->htotal && 6270 native_mode->vtotal == drm_mode->vtotal) { 6271 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6272 } else { 6273 /* no scaling nor amdgpu inserted, no need to patch */ 6274 } 6275 } 6276 6277 static struct dc_sink * 6278 create_fake_sink(struct dc_link *link) 6279 { 6280 struct dc_sink_init_data sink_init_data = { 0 }; 6281 struct dc_sink *sink = NULL; 6282 6283 sink_init_data.link = link; 6284 sink_init_data.sink_signal = link->connector_signal; 6285 6286 sink = dc_sink_create(&sink_init_data); 6287 if (!sink) { 6288 DRM_ERROR("Failed to create sink!\n"); 6289 return NULL; 6290 } 6291 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6292 6293 return sink; 6294 } 6295 6296 static void set_multisync_trigger_params( 6297 struct dc_stream_state *stream) 6298 { 6299 struct dc_stream_state *master = NULL; 6300 6301 if (stream->triggered_crtc_reset.enabled) { 6302 master = stream->triggered_crtc_reset.event_source; 6303 stream->triggered_crtc_reset.event = 6304 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6305 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6306 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6307 } 6308 } 6309 6310 static void set_master_stream(struct dc_stream_state *stream_set[], 6311 int stream_count) 6312 { 6313 int j, highest_rfr = 0, master_stream = 0; 6314 6315 for (j = 0; j < stream_count; j++) { 6316 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6317 int refresh_rate = 0; 6318 6319 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6320 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6321 if (refresh_rate > highest_rfr) { 6322 highest_rfr = refresh_rate; 6323 master_stream = j; 6324 } 6325 } 6326 } 6327 for (j = 0; j < stream_count; j++) { 6328 if (stream_set[j]) 6329 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6330 } 6331 } 6332 6333 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6334 { 6335 int i = 0; 6336 struct dc_stream_state *stream; 6337 6338 if (context->stream_count < 2) 6339 return; 6340 for (i = 0; i < context->stream_count ; i++) { 6341 if (!context->streams[i]) 6342 continue; 6343 /* 6344 * TODO: add a function to read AMD VSDB bits and set 6345 * crtc_sync_master.multi_sync_enabled flag 6346 * For now it's set to false 6347 */ 6348 } 6349 6350 set_master_stream(context->streams, context->stream_count); 6351 6352 for (i = 0; i < context->stream_count ; i++) { 6353 stream = context->streams[i]; 6354 6355 if (!stream) 6356 continue; 6357 6358 set_multisync_trigger_params(stream); 6359 } 6360 } 6361 6362 /** 6363 * DOC: FreeSync Video 6364 * 6365 * When a userspace application wants to play a video, the content follows a 6366 * standard format definition that usually specifies the FPS for that format. 6367 * The below list illustrates some video format and the expected FPS, 6368 * respectively: 6369 * 6370 * - TV/NTSC (23.976 FPS) 6371 * - Cinema (24 FPS) 6372 * - TV/PAL (25 FPS) 6373 * - TV/NTSC (29.97 FPS) 6374 * - TV/NTSC (30 FPS) 6375 * - Cinema HFR (48 FPS) 6376 * - TV/PAL (50 FPS) 6377 * - Commonly used (60 FPS) 6378 * - Multiples of 24 (48,72,96 FPS) 6379 * 6380 * The list of standards video format is not huge and can be added to the 6381 * connector modeset list beforehand. With that, userspace can leverage 6382 * FreeSync to extends the front porch in order to attain the target refresh 6383 * rate. Such a switch will happen seamlessly, without screen blanking or 6384 * reprogramming of the output in any other way. If the userspace requests a 6385 * modesetting change compatible with FreeSync modes that only differ in the 6386 * refresh rate, DC will skip the full update and avoid blink during the 6387 * transition. For example, the video player can change the modesetting from 6388 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6389 * causing any display blink. This same concept can be applied to a mode 6390 * setting change. 6391 */ 6392 static struct drm_display_mode * 6393 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6394 bool use_probed_modes) 6395 { 6396 struct drm_display_mode *m, *m_pref = NULL; 6397 u16 current_refresh, highest_refresh; 6398 struct list_head *list_head = use_probed_modes ? 6399 &aconnector->base.probed_modes : 6400 &aconnector->base.modes; 6401 6402 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6403 return NULL; 6404 6405 if (aconnector->freesync_vid_base.clock != 0) 6406 return &aconnector->freesync_vid_base; 6407 6408 /* Find the preferred mode */ 6409 list_for_each_entry(m, list_head, head) { 6410 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6411 m_pref = m; 6412 break; 6413 } 6414 } 6415 6416 if (!m_pref) { 6417 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6418 m_pref = list_first_entry_or_null( 6419 &aconnector->base.modes, struct drm_display_mode, head); 6420 if (!m_pref) { 6421 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6422 return NULL; 6423 } 6424 } 6425 6426 highest_refresh = drm_mode_vrefresh(m_pref); 6427 6428 /* 6429 * Find the mode with highest refresh rate with same resolution. 6430 * For some monitors, preferred mode is not the mode with highest 6431 * supported refresh rate. 6432 */ 6433 list_for_each_entry(m, list_head, head) { 6434 current_refresh = drm_mode_vrefresh(m); 6435 6436 if (m->hdisplay == m_pref->hdisplay && 6437 m->vdisplay == m_pref->vdisplay && 6438 highest_refresh < current_refresh) { 6439 highest_refresh = current_refresh; 6440 m_pref = m; 6441 } 6442 } 6443 6444 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6445 return m_pref; 6446 } 6447 6448 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6449 struct amdgpu_dm_connector *aconnector) 6450 { 6451 struct drm_display_mode *high_mode; 6452 int timing_diff; 6453 6454 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6455 if (!high_mode || !mode) 6456 return false; 6457 6458 timing_diff = high_mode->vtotal - mode->vtotal; 6459 6460 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6461 high_mode->hdisplay != mode->hdisplay || 6462 high_mode->vdisplay != mode->vdisplay || 6463 high_mode->hsync_start != mode->hsync_start || 6464 high_mode->hsync_end != mode->hsync_end || 6465 high_mode->htotal != mode->htotal || 6466 high_mode->hskew != mode->hskew || 6467 high_mode->vscan != mode->vscan || 6468 high_mode->vsync_start - mode->vsync_start != timing_diff || 6469 high_mode->vsync_end - mode->vsync_end != timing_diff) 6470 return false; 6471 else 6472 return true; 6473 } 6474 6475 #if defined(CONFIG_DRM_AMD_DC_FP) 6476 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6477 struct dc_sink *sink, struct dc_stream_state *stream, 6478 struct dsc_dec_dpcd_caps *dsc_caps) 6479 { 6480 stream->timing.flags.DSC = 0; 6481 dsc_caps->is_dsc_supported = false; 6482 6483 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6484 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6485 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6486 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6487 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6488 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6489 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6490 dsc_caps); 6491 } 6492 } 6493 6494 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6495 struct dc_sink *sink, struct dc_stream_state *stream, 6496 struct dsc_dec_dpcd_caps *dsc_caps, 6497 uint32_t max_dsc_target_bpp_limit_override) 6498 { 6499 const struct dc_link_settings *verified_link_cap = NULL; 6500 u32 link_bw_in_kbps; 6501 u32 edp_min_bpp_x16, edp_max_bpp_x16; 6502 struct dc *dc = sink->ctx->dc; 6503 struct dc_dsc_bw_range bw_range = {0}; 6504 struct dc_dsc_config dsc_cfg = {0}; 6505 struct dc_dsc_config_options dsc_options = {0}; 6506 6507 dc_dsc_get_default_config_option(dc, &dsc_options); 6508 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6509 6510 verified_link_cap = dc_link_get_link_cap(stream->link); 6511 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6512 edp_min_bpp_x16 = 8 * 16; 6513 edp_max_bpp_x16 = 8 * 16; 6514 6515 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6516 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6517 6518 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6519 edp_min_bpp_x16 = edp_max_bpp_x16; 6520 6521 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6522 dc->debug.dsc_min_slice_height_override, 6523 edp_min_bpp_x16, edp_max_bpp_x16, 6524 dsc_caps, 6525 &stream->timing, 6526 dc_link_get_highest_encoding_format(aconnector->dc_link), 6527 &bw_range)) { 6528 6529 if (bw_range.max_kbps < link_bw_in_kbps) { 6530 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6531 dsc_caps, 6532 &dsc_options, 6533 0, 6534 &stream->timing, 6535 dc_link_get_highest_encoding_format(aconnector->dc_link), 6536 &dsc_cfg)) { 6537 stream->timing.dsc_cfg = dsc_cfg; 6538 stream->timing.flags.DSC = 1; 6539 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6540 } 6541 return; 6542 } 6543 } 6544 6545 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6546 dsc_caps, 6547 &dsc_options, 6548 link_bw_in_kbps, 6549 &stream->timing, 6550 dc_link_get_highest_encoding_format(aconnector->dc_link), 6551 &dsc_cfg)) { 6552 stream->timing.dsc_cfg = dsc_cfg; 6553 stream->timing.flags.DSC = 1; 6554 } 6555 } 6556 6557 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6558 struct dc_sink *sink, struct dc_stream_state *stream, 6559 struct dsc_dec_dpcd_caps *dsc_caps) 6560 { 6561 struct drm_connector *drm_connector = &aconnector->base; 6562 u32 link_bandwidth_kbps; 6563 struct dc *dc = sink->ctx->dc; 6564 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 6565 u32 dsc_max_supported_bw_in_kbps; 6566 u32 max_dsc_target_bpp_limit_override = 6567 drm_connector->display_info.max_dsc_bpp; 6568 struct dc_dsc_config_options dsc_options = {0}; 6569 6570 dc_dsc_get_default_config_option(dc, &dsc_options); 6571 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6572 6573 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6574 dc_link_get_link_cap(aconnector->dc_link)); 6575 6576 /* Set DSC policy according to dsc_clock_en */ 6577 dc_dsc_policy_set_enable_dsc_when_not_needed( 6578 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6579 6580 if (sink->sink_signal == SIGNAL_TYPE_EDP && 6581 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 6582 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6583 6584 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6585 6586 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6587 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6588 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6589 dsc_caps, 6590 &dsc_options, 6591 link_bandwidth_kbps, 6592 &stream->timing, 6593 dc_link_get_highest_encoding_format(aconnector->dc_link), 6594 &stream->timing.dsc_cfg)) { 6595 stream->timing.flags.DSC = 1; 6596 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", 6597 __func__, drm_connector->name); 6598 } 6599 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6600 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 6601 dc_link_get_highest_encoding_format(aconnector->dc_link)); 6602 max_supported_bw_in_kbps = link_bandwidth_kbps; 6603 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6604 6605 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6606 max_supported_bw_in_kbps > 0 && 6607 dsc_max_supported_bw_in_kbps > 0) 6608 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6609 dsc_caps, 6610 &dsc_options, 6611 dsc_max_supported_bw_in_kbps, 6612 &stream->timing, 6613 dc_link_get_highest_encoding_format(aconnector->dc_link), 6614 &stream->timing.dsc_cfg)) { 6615 stream->timing.flags.DSC = 1; 6616 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", 6617 __func__, drm_connector->name); 6618 } 6619 } 6620 } 6621 6622 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6623 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6624 stream->timing.flags.DSC = 1; 6625 6626 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6627 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6628 6629 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6630 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6631 6632 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6633 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6634 } 6635 #endif 6636 6637 static struct dc_stream_state * 6638 create_stream_for_sink(struct drm_connector *connector, 6639 const struct drm_display_mode *drm_mode, 6640 const struct dm_connector_state *dm_state, 6641 const struct dc_stream_state *old_stream, 6642 int requested_bpc) 6643 { 6644 struct amdgpu_dm_connector *aconnector = NULL; 6645 struct drm_display_mode *preferred_mode = NULL; 6646 const struct drm_connector_state *con_state = &dm_state->base; 6647 struct dc_stream_state *stream = NULL; 6648 struct drm_display_mode mode; 6649 struct drm_display_mode saved_mode; 6650 struct drm_display_mode *freesync_mode = NULL; 6651 bool native_mode_found = false; 6652 bool recalculate_timing = false; 6653 bool scale = dm_state->scaling != RMX_OFF; 6654 int mode_refresh; 6655 int preferred_refresh = 0; 6656 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6657 #if defined(CONFIG_DRM_AMD_DC_FP) 6658 struct dsc_dec_dpcd_caps dsc_caps; 6659 #endif 6660 struct dc_link *link = NULL; 6661 struct dc_sink *sink = NULL; 6662 6663 drm_mode_init(&mode, drm_mode); 6664 memset(&saved_mode, 0, sizeof(saved_mode)); 6665 6666 if (connector == NULL) { 6667 DRM_ERROR("connector is NULL!\n"); 6668 return stream; 6669 } 6670 6671 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 6672 aconnector = NULL; 6673 aconnector = to_amdgpu_dm_connector(connector); 6674 link = aconnector->dc_link; 6675 } else { 6676 struct drm_writeback_connector *wbcon = NULL; 6677 struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 6678 6679 wbcon = drm_connector_to_writeback(connector); 6680 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 6681 link = dm_wbcon->link; 6682 } 6683 6684 if (!aconnector || !aconnector->dc_sink) { 6685 sink = create_fake_sink(link); 6686 if (!sink) 6687 return stream; 6688 6689 } else { 6690 sink = aconnector->dc_sink; 6691 dc_sink_retain(sink); 6692 } 6693 6694 stream = dc_create_stream_for_sink(sink); 6695 6696 if (stream == NULL) { 6697 DRM_ERROR("Failed to create stream for sink!\n"); 6698 goto finish; 6699 } 6700 6701 /* We leave this NULL for writeback connectors */ 6702 stream->dm_stream_context = aconnector; 6703 6704 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6705 connector->display_info.hdmi.scdc.scrambling.low_rates; 6706 6707 list_for_each_entry(preferred_mode, &connector->modes, head) { 6708 /* Search for preferred mode */ 6709 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6710 native_mode_found = true; 6711 break; 6712 } 6713 } 6714 if (!native_mode_found) 6715 preferred_mode = list_first_entry_or_null( 6716 &connector->modes, 6717 struct drm_display_mode, 6718 head); 6719 6720 mode_refresh = drm_mode_vrefresh(&mode); 6721 6722 if (preferred_mode == NULL) { 6723 /* 6724 * This may not be an error, the use case is when we have no 6725 * usermode calls to reset and set mode upon hotplug. In this 6726 * case, we call set mode ourselves to restore the previous mode 6727 * and the modelist may not be filled in time. 6728 */ 6729 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6730 } else if (aconnector) { 6731 recalculate_timing = amdgpu_freesync_vid_mode && 6732 is_freesync_video_mode(&mode, aconnector); 6733 if (recalculate_timing) { 6734 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6735 drm_mode_copy(&saved_mode, &mode); 6736 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 6737 drm_mode_copy(&mode, freesync_mode); 6738 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 6739 } else { 6740 decide_crtc_timing_for_drm_display_mode( 6741 &mode, preferred_mode, scale); 6742 6743 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6744 } 6745 } 6746 6747 if (recalculate_timing) 6748 drm_mode_set_crtcinfo(&saved_mode, 0); 6749 6750 /* 6751 * If scaling is enabled and refresh rate didn't change 6752 * we copy the vic and polarities of the old timings 6753 */ 6754 if (!scale || mode_refresh != preferred_refresh) 6755 fill_stream_properties_from_drm_display_mode( 6756 stream, &mode, connector, con_state, NULL, 6757 requested_bpc); 6758 else 6759 fill_stream_properties_from_drm_display_mode( 6760 stream, &mode, connector, con_state, old_stream, 6761 requested_bpc); 6762 6763 /* The rest isn't needed for writeback connectors */ 6764 if (!aconnector) 6765 goto finish; 6766 6767 if (aconnector->timing_changed) { 6768 drm_dbg(aconnector->base.dev, 6769 "overriding timing for automated test, bpc %d, changing to %d\n", 6770 stream->timing.display_color_depth, 6771 aconnector->timing_requested->display_color_depth); 6772 stream->timing = *aconnector->timing_requested; 6773 } 6774 6775 #if defined(CONFIG_DRM_AMD_DC_FP) 6776 /* SST DSC determination policy */ 6777 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6778 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6779 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6780 #endif 6781 6782 update_stream_scaling_settings(&mode, dm_state, stream); 6783 6784 fill_audio_info( 6785 &stream->audio_info, 6786 connector, 6787 sink); 6788 6789 update_stream_signal(stream, sink); 6790 6791 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6792 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6793 6794 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6795 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6796 stream->signal == SIGNAL_TYPE_EDP) { 6797 const struct dc_edid_caps *edid_caps; 6798 unsigned int disable_colorimetry = 0; 6799 6800 if (aconnector->dc_sink) { 6801 edid_caps = &aconnector->dc_sink->edid_caps; 6802 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; 6803 } 6804 6805 // 6806 // should decide stream support vsc sdp colorimetry capability 6807 // before building vsc info packet 6808 // 6809 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 6810 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && 6811 !disable_colorimetry; 6812 6813 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 6814 tf = TRANSFER_FUNC_GAMMA_22; 6815 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6816 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6817 6818 } 6819 finish: 6820 dc_sink_release(sink); 6821 6822 return stream; 6823 } 6824 6825 static enum drm_connector_status 6826 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6827 { 6828 bool connected; 6829 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6830 6831 /* 6832 * Notes: 6833 * 1. This interface is NOT called in context of HPD irq. 6834 * 2. This interface *is called* in context of user-mode ioctl. Which 6835 * makes it a bad place for *any* MST-related activity. 6836 */ 6837 6838 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6839 !aconnector->fake_enable) 6840 connected = (aconnector->dc_sink != NULL); 6841 else 6842 connected = (aconnector->base.force == DRM_FORCE_ON || 6843 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6844 6845 update_subconnector_property(aconnector); 6846 6847 return (connected ? connector_status_connected : 6848 connector_status_disconnected); 6849 } 6850 6851 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6852 struct drm_connector_state *connector_state, 6853 struct drm_property *property, 6854 uint64_t val) 6855 { 6856 struct drm_device *dev = connector->dev; 6857 struct amdgpu_device *adev = drm_to_adev(dev); 6858 struct dm_connector_state *dm_old_state = 6859 to_dm_connector_state(connector->state); 6860 struct dm_connector_state *dm_new_state = 6861 to_dm_connector_state(connector_state); 6862 6863 int ret = -EINVAL; 6864 6865 if (property == dev->mode_config.scaling_mode_property) { 6866 enum amdgpu_rmx_type rmx_type; 6867 6868 switch (val) { 6869 case DRM_MODE_SCALE_CENTER: 6870 rmx_type = RMX_CENTER; 6871 break; 6872 case DRM_MODE_SCALE_ASPECT: 6873 rmx_type = RMX_ASPECT; 6874 break; 6875 case DRM_MODE_SCALE_FULLSCREEN: 6876 rmx_type = RMX_FULL; 6877 break; 6878 case DRM_MODE_SCALE_NONE: 6879 default: 6880 rmx_type = RMX_OFF; 6881 break; 6882 } 6883 6884 if (dm_old_state->scaling == rmx_type) 6885 return 0; 6886 6887 dm_new_state->scaling = rmx_type; 6888 ret = 0; 6889 } else if (property == adev->mode_info.underscan_hborder_property) { 6890 dm_new_state->underscan_hborder = val; 6891 ret = 0; 6892 } else if (property == adev->mode_info.underscan_vborder_property) { 6893 dm_new_state->underscan_vborder = val; 6894 ret = 0; 6895 } else if (property == adev->mode_info.underscan_property) { 6896 dm_new_state->underscan_enable = val; 6897 ret = 0; 6898 } 6899 6900 return ret; 6901 } 6902 6903 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6904 const struct drm_connector_state *state, 6905 struct drm_property *property, 6906 uint64_t *val) 6907 { 6908 struct drm_device *dev = connector->dev; 6909 struct amdgpu_device *adev = drm_to_adev(dev); 6910 struct dm_connector_state *dm_state = 6911 to_dm_connector_state(state); 6912 int ret = -EINVAL; 6913 6914 if (property == dev->mode_config.scaling_mode_property) { 6915 switch (dm_state->scaling) { 6916 case RMX_CENTER: 6917 *val = DRM_MODE_SCALE_CENTER; 6918 break; 6919 case RMX_ASPECT: 6920 *val = DRM_MODE_SCALE_ASPECT; 6921 break; 6922 case RMX_FULL: 6923 *val = DRM_MODE_SCALE_FULLSCREEN; 6924 break; 6925 case RMX_OFF: 6926 default: 6927 *val = DRM_MODE_SCALE_NONE; 6928 break; 6929 } 6930 ret = 0; 6931 } else if (property == adev->mode_info.underscan_hborder_property) { 6932 *val = dm_state->underscan_hborder; 6933 ret = 0; 6934 } else if (property == adev->mode_info.underscan_vborder_property) { 6935 *val = dm_state->underscan_vborder; 6936 ret = 0; 6937 } else if (property == adev->mode_info.underscan_property) { 6938 *val = dm_state->underscan_enable; 6939 ret = 0; 6940 } 6941 6942 return ret; 6943 } 6944 6945 /** 6946 * DOC: panel power savings 6947 * 6948 * The display manager allows you to set your desired **panel power savings** 6949 * level (between 0-4, with 0 representing off), e.g. using the following:: 6950 * 6951 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings 6952 * 6953 * Modifying this value can have implications on color accuracy, so tread 6954 * carefully. 6955 */ 6956 6957 static ssize_t panel_power_savings_show(struct device *device, 6958 struct device_attribute *attr, 6959 char *buf) 6960 { 6961 struct drm_connector *connector = dev_get_drvdata(device); 6962 struct drm_device *dev = connector->dev; 6963 u8 val; 6964 6965 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6966 val = to_dm_connector_state(connector->state)->abm_level == 6967 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : 6968 to_dm_connector_state(connector->state)->abm_level; 6969 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6970 6971 return sysfs_emit(buf, "%u\n", val); 6972 } 6973 6974 static ssize_t panel_power_savings_store(struct device *device, 6975 struct device_attribute *attr, 6976 const char *buf, size_t count) 6977 { 6978 struct drm_connector *connector = dev_get_drvdata(device); 6979 struct drm_device *dev = connector->dev; 6980 long val; 6981 int ret; 6982 6983 ret = kstrtol(buf, 0, &val); 6984 6985 if (ret) 6986 return ret; 6987 6988 if (val < 0 || val > 4) 6989 return -EINVAL; 6990 6991 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6992 to_dm_connector_state(connector->state)->abm_level = val ?: 6993 ABM_LEVEL_IMMEDIATE_DISABLE; 6994 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6995 6996 drm_kms_helper_hotplug_event(dev); 6997 6998 return count; 6999 } 7000 7001 static DEVICE_ATTR_RW(panel_power_savings); 7002 7003 static struct attribute *amdgpu_attrs[] = { 7004 &dev_attr_panel_power_savings.attr, 7005 NULL 7006 }; 7007 7008 static const struct attribute_group amdgpu_group = { 7009 .name = "amdgpu", 7010 .attrs = amdgpu_attrs 7011 }; 7012 7013 static bool 7014 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) 7015 { 7016 if (amdgpu_dm_abm_level >= 0) 7017 return false; 7018 7019 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 7020 return false; 7021 7022 /* check for OLED panels */ 7023 if (amdgpu_dm_connector->bl_idx >= 0) { 7024 struct drm_device *drm = amdgpu_dm_connector->base.dev; 7025 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 7026 struct amdgpu_dm_backlight_caps *caps; 7027 7028 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; 7029 if (caps->aux_support) 7030 return false; 7031 } 7032 7033 return true; 7034 } 7035 7036 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7037 { 7038 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7039 7040 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) 7041 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); 7042 7043 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7044 } 7045 7046 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7047 { 7048 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7049 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7050 struct amdgpu_display_manager *dm = &adev->dm; 7051 7052 /* 7053 * Call only if mst_mgr was initialized before since it's not done 7054 * for all connector types. 7055 */ 7056 if (aconnector->mst_mgr.dev) 7057 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7058 7059 if (aconnector->bl_idx != -1) { 7060 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 7061 dm->backlight_dev[aconnector->bl_idx] = NULL; 7062 } 7063 7064 if (aconnector->dc_em_sink) 7065 dc_sink_release(aconnector->dc_em_sink); 7066 aconnector->dc_em_sink = NULL; 7067 if (aconnector->dc_sink) 7068 dc_sink_release(aconnector->dc_sink); 7069 aconnector->dc_sink = NULL; 7070 7071 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7072 drm_connector_unregister(connector); 7073 drm_connector_cleanup(connector); 7074 if (aconnector->i2c) { 7075 i2c_del_adapter(&aconnector->i2c->base); 7076 kfree(aconnector->i2c); 7077 } 7078 kfree(aconnector->dm_dp_aux.aux.name); 7079 7080 kfree(connector); 7081 } 7082 7083 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7084 { 7085 struct dm_connector_state *state = 7086 to_dm_connector_state(connector->state); 7087 7088 if (connector->state) 7089 __drm_atomic_helper_connector_destroy_state(connector->state); 7090 7091 kfree(state); 7092 7093 state = kzalloc(sizeof(*state), GFP_KERNEL); 7094 7095 if (state) { 7096 state->scaling = RMX_OFF; 7097 state->underscan_enable = false; 7098 state->underscan_hborder = 0; 7099 state->underscan_vborder = 0; 7100 state->base.max_requested_bpc = 8; 7101 state->vcpi_slots = 0; 7102 state->pbn = 0; 7103 7104 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 7105 if (amdgpu_dm_abm_level <= 0) 7106 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7107 else 7108 state->abm_level = amdgpu_dm_abm_level; 7109 } 7110 7111 __drm_atomic_helper_connector_reset(connector, &state->base); 7112 } 7113 } 7114 7115 struct drm_connector_state * 7116 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7117 { 7118 struct dm_connector_state *state = 7119 to_dm_connector_state(connector->state); 7120 7121 struct dm_connector_state *new_state = 7122 kmemdup(state, sizeof(*state), GFP_KERNEL); 7123 7124 if (!new_state) 7125 return NULL; 7126 7127 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7128 7129 new_state->freesync_capable = state->freesync_capable; 7130 new_state->abm_level = state->abm_level; 7131 new_state->scaling = state->scaling; 7132 new_state->underscan_enable = state->underscan_enable; 7133 new_state->underscan_hborder = state->underscan_hborder; 7134 new_state->underscan_vborder = state->underscan_vborder; 7135 new_state->vcpi_slots = state->vcpi_slots; 7136 new_state->pbn = state->pbn; 7137 return &new_state->base; 7138 } 7139 7140 static int 7141 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7142 { 7143 struct amdgpu_dm_connector *amdgpu_dm_connector = 7144 to_amdgpu_dm_connector(connector); 7145 int r; 7146 7147 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { 7148 r = sysfs_create_group(&connector->kdev->kobj, 7149 &amdgpu_group); 7150 if (r) 7151 return r; 7152 } 7153 7154 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 7155 7156 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7157 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7158 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7159 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7160 if (r) 7161 return r; 7162 } 7163 7164 #if defined(CONFIG_DEBUG_FS) 7165 connector_debugfs_init(amdgpu_dm_connector); 7166 #endif 7167 7168 return 0; 7169 } 7170 7171 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 7172 { 7173 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7174 struct dc_link *dc_link = aconnector->dc_link; 7175 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 7176 const struct drm_edid *drm_edid; 7177 7178 drm_edid = drm_edid_read(connector); 7179 drm_edid_connector_update(connector, drm_edid); 7180 if (!drm_edid) { 7181 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7182 return; 7183 } 7184 7185 aconnector->drm_edid = drm_edid; 7186 /* Update emulated (virtual) sink's EDID */ 7187 if (dc_em_sink && dc_link) { 7188 // FIXME: Get rid of drm_edid_raw() 7189 const struct edid *edid = drm_edid_raw(drm_edid); 7190 7191 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 7192 memmove(dc_em_sink->dc_edid.raw_edid, edid, 7193 (edid->extensions + 1) * EDID_LENGTH); 7194 dm_helpers_parse_edid_caps( 7195 dc_link, 7196 &dc_em_sink->dc_edid, 7197 &dc_em_sink->edid_caps); 7198 } 7199 } 7200 7201 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7202 .reset = amdgpu_dm_connector_funcs_reset, 7203 .detect = amdgpu_dm_connector_detect, 7204 .fill_modes = drm_helper_probe_single_connector_modes, 7205 .destroy = amdgpu_dm_connector_destroy, 7206 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7207 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7208 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7209 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7210 .late_register = amdgpu_dm_connector_late_register, 7211 .early_unregister = amdgpu_dm_connector_unregister, 7212 .force = amdgpu_dm_connector_funcs_force 7213 }; 7214 7215 static int get_modes(struct drm_connector *connector) 7216 { 7217 return amdgpu_dm_connector_get_modes(connector); 7218 } 7219 7220 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7221 { 7222 struct drm_connector *connector = &aconnector->base; 7223 struct dc_sink_init_data init_params = { 7224 .link = aconnector->dc_link, 7225 .sink_signal = SIGNAL_TYPE_VIRTUAL 7226 }; 7227 const struct drm_edid *drm_edid; 7228 const struct edid *edid; 7229 7230 drm_edid = drm_edid_read(connector); 7231 drm_edid_connector_update(connector, drm_edid); 7232 if (!drm_edid) { 7233 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7234 return; 7235 } 7236 7237 if (connector->display_info.is_hdmi) 7238 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 7239 7240 aconnector->drm_edid = drm_edid; 7241 7242 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 7243 aconnector->dc_em_sink = dc_link_add_remote_sink( 7244 aconnector->dc_link, 7245 (uint8_t *)edid, 7246 (edid->extensions + 1) * EDID_LENGTH, 7247 &init_params); 7248 7249 if (aconnector->base.force == DRM_FORCE_ON) { 7250 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7251 aconnector->dc_link->local_sink : 7252 aconnector->dc_em_sink; 7253 if (aconnector->dc_sink) 7254 dc_sink_retain(aconnector->dc_sink); 7255 } 7256 } 7257 7258 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7259 { 7260 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7261 7262 /* 7263 * In case of headless boot with force on for DP managed connector 7264 * Those settings have to be != 0 to get initial modeset 7265 */ 7266 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7267 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7268 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7269 } 7270 7271 create_eml_sink(aconnector); 7272 } 7273 7274 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 7275 struct dc_stream_state *stream) 7276 { 7277 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 7278 struct dc_plane_state *dc_plane_state = NULL; 7279 struct dc_state *dc_state = NULL; 7280 7281 if (!stream) 7282 goto cleanup; 7283 7284 dc_plane_state = dc_create_plane_state(dc); 7285 if (!dc_plane_state) 7286 goto cleanup; 7287 7288 dc_state = dc_state_create(dc, NULL); 7289 if (!dc_state) 7290 goto cleanup; 7291 7292 /* populate stream to plane */ 7293 dc_plane_state->src_rect.height = stream->src.height; 7294 dc_plane_state->src_rect.width = stream->src.width; 7295 dc_plane_state->dst_rect.height = stream->src.height; 7296 dc_plane_state->dst_rect.width = stream->src.width; 7297 dc_plane_state->clip_rect.height = stream->src.height; 7298 dc_plane_state->clip_rect.width = stream->src.width; 7299 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 7300 dc_plane_state->plane_size.surface_size.height = stream->src.height; 7301 dc_plane_state->plane_size.surface_size.width = stream->src.width; 7302 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 7303 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 7304 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 7305 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 7306 dc_plane_state->rotation = ROTATION_ANGLE_0; 7307 dc_plane_state->is_tiling_rotated = false; 7308 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 7309 7310 dc_result = dc_validate_stream(dc, stream); 7311 if (dc_result == DC_OK) 7312 dc_result = dc_validate_plane(dc, dc_plane_state); 7313 7314 if (dc_result == DC_OK) 7315 dc_result = dc_state_add_stream(dc, dc_state, stream); 7316 7317 if (dc_result == DC_OK && !dc_state_add_plane( 7318 dc, 7319 stream, 7320 dc_plane_state, 7321 dc_state)) 7322 dc_result = DC_FAIL_ATTACH_SURFACES; 7323 7324 if (dc_result == DC_OK) 7325 dc_result = dc_validate_global_state(dc, dc_state, true); 7326 7327 cleanup: 7328 if (dc_state) 7329 dc_state_release(dc_state); 7330 7331 if (dc_plane_state) 7332 dc_plane_state_release(dc_plane_state); 7333 7334 return dc_result; 7335 } 7336 7337 struct dc_stream_state * 7338 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 7339 const struct drm_display_mode *drm_mode, 7340 const struct dm_connector_state *dm_state, 7341 const struct dc_stream_state *old_stream) 7342 { 7343 struct drm_connector *connector = &aconnector->base; 7344 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7345 struct dc_stream_state *stream; 7346 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7347 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7348 enum dc_status dc_result = DC_OK; 7349 uint8_t bpc_limit = 6; 7350 7351 if (!dm_state) 7352 return NULL; 7353 7354 if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || 7355 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 7356 bpc_limit = 8; 7357 7358 do { 7359 stream = create_stream_for_sink(connector, drm_mode, 7360 dm_state, old_stream, 7361 requested_bpc); 7362 if (stream == NULL) { 7363 DRM_ERROR("Failed to create stream for sink!\n"); 7364 break; 7365 } 7366 7367 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7368 return stream; 7369 7370 dc_result = dc_validate_stream(adev->dm.dc, stream); 7371 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 7372 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 7373 7374 if (dc_result == DC_OK) 7375 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 7376 7377 if (dc_result != DC_OK) { 7378 DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", 7379 drm_mode->hdisplay, 7380 drm_mode->vdisplay, 7381 drm_mode->clock, 7382 dc_pixel_encoding_to_str(stream->timing.pixel_encoding), 7383 dc_color_depth_to_str(stream->timing.display_color_depth), 7384 dc_status_to_str(dc_result)); 7385 7386 dc_stream_release(stream); 7387 stream = NULL; 7388 requested_bpc -= 2; /* lower bpc to retry validation */ 7389 } 7390 7391 } while (stream == NULL && requested_bpc >= bpc_limit); 7392 7393 if ((dc_result == DC_FAIL_ENC_VALIDATE || 7394 dc_result == DC_EXCEED_DONGLE_CAP) && 7395 !aconnector->force_yuv420_output) { 7396 DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", 7397 __func__, __LINE__); 7398 7399 aconnector->force_yuv420_output = true; 7400 stream = create_validate_stream_for_sink(aconnector, drm_mode, 7401 dm_state, old_stream); 7402 aconnector->force_yuv420_output = false; 7403 } 7404 7405 return stream; 7406 } 7407 7408 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7409 struct drm_display_mode *mode) 7410 { 7411 int result = MODE_ERROR; 7412 struct dc_sink *dc_sink; 7413 /* TODO: Unhardcode stream count */ 7414 struct dc_stream_state *stream; 7415 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7416 7417 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7418 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7419 return result; 7420 7421 /* 7422 * Only run this the first time mode_valid is called to initilialize 7423 * EDID mgmt 7424 */ 7425 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7426 !aconnector->dc_em_sink) 7427 handle_edid_mgmt(aconnector); 7428 7429 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7430 7431 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7432 aconnector->base.force != DRM_FORCE_ON) { 7433 DRM_ERROR("dc_sink is NULL!\n"); 7434 goto fail; 7435 } 7436 7437 drm_mode_set_crtcinfo(mode, 0); 7438 7439 stream = create_validate_stream_for_sink(aconnector, mode, 7440 to_dm_connector_state(connector->state), 7441 NULL); 7442 if (stream) { 7443 dc_stream_release(stream); 7444 result = MODE_OK; 7445 } 7446 7447 fail: 7448 /* TODO: error handling*/ 7449 return result; 7450 } 7451 7452 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7453 struct dc_info_packet *out) 7454 { 7455 struct hdmi_drm_infoframe frame; 7456 unsigned char buf[30]; /* 26 + 4 */ 7457 ssize_t len; 7458 int ret, i; 7459 7460 memset(out, 0, sizeof(*out)); 7461 7462 if (!state->hdr_output_metadata) 7463 return 0; 7464 7465 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7466 if (ret) 7467 return ret; 7468 7469 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7470 if (len < 0) 7471 return (int)len; 7472 7473 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7474 if (len != 30) 7475 return -EINVAL; 7476 7477 /* Prepare the infopacket for DC. */ 7478 switch (state->connector->connector_type) { 7479 case DRM_MODE_CONNECTOR_HDMIA: 7480 out->hb0 = 0x87; /* type */ 7481 out->hb1 = 0x01; /* version */ 7482 out->hb2 = 0x1A; /* length */ 7483 out->sb[0] = buf[3]; /* checksum */ 7484 i = 1; 7485 break; 7486 7487 case DRM_MODE_CONNECTOR_DisplayPort: 7488 case DRM_MODE_CONNECTOR_eDP: 7489 out->hb0 = 0x00; /* sdp id, zero */ 7490 out->hb1 = 0x87; /* type */ 7491 out->hb2 = 0x1D; /* payload len - 1 */ 7492 out->hb3 = (0x13 << 2); /* sdp version */ 7493 out->sb[0] = 0x01; /* version */ 7494 out->sb[1] = 0x1A; /* length */ 7495 i = 2; 7496 break; 7497 7498 default: 7499 return -EINVAL; 7500 } 7501 7502 memcpy(&out->sb[i], &buf[4], 26); 7503 out->valid = true; 7504 7505 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7506 sizeof(out->sb), false); 7507 7508 return 0; 7509 } 7510 7511 static int 7512 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7513 struct drm_atomic_state *state) 7514 { 7515 struct drm_connector_state *new_con_state = 7516 drm_atomic_get_new_connector_state(state, conn); 7517 struct drm_connector_state *old_con_state = 7518 drm_atomic_get_old_connector_state(state, conn); 7519 struct drm_crtc *crtc = new_con_state->crtc; 7520 struct drm_crtc_state *new_crtc_state; 7521 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 7522 int ret; 7523 7524 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7525 7526 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 7527 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 7528 if (ret < 0) 7529 return ret; 7530 } 7531 7532 if (!crtc) 7533 return 0; 7534 7535 if (new_con_state->colorspace != old_con_state->colorspace) { 7536 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7537 if (IS_ERR(new_crtc_state)) 7538 return PTR_ERR(new_crtc_state); 7539 7540 new_crtc_state->mode_changed = true; 7541 } 7542 7543 if (new_con_state->content_type != old_con_state->content_type) { 7544 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7545 if (IS_ERR(new_crtc_state)) 7546 return PTR_ERR(new_crtc_state); 7547 7548 new_crtc_state->mode_changed = true; 7549 } 7550 7551 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7552 struct dc_info_packet hdr_infopacket; 7553 7554 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7555 if (ret) 7556 return ret; 7557 7558 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7559 if (IS_ERR(new_crtc_state)) 7560 return PTR_ERR(new_crtc_state); 7561 7562 /* 7563 * DC considers the stream backends changed if the 7564 * static metadata changes. Forcing the modeset also 7565 * gives a simple way for userspace to switch from 7566 * 8bpc to 10bpc when setting the metadata to enter 7567 * or exit HDR. 7568 * 7569 * Changing the static metadata after it's been 7570 * set is permissible, however. So only force a 7571 * modeset if we're entering or exiting HDR. 7572 */ 7573 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 7574 !old_con_state->hdr_output_metadata || 7575 !new_con_state->hdr_output_metadata; 7576 } 7577 7578 return 0; 7579 } 7580 7581 static const struct drm_connector_helper_funcs 7582 amdgpu_dm_connector_helper_funcs = { 7583 /* 7584 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7585 * modes will be filtered by drm_mode_validate_size(), and those modes 7586 * are missing after user start lightdm. So we need to renew modes list. 7587 * in get_modes call back, not just return the modes count 7588 */ 7589 .get_modes = get_modes, 7590 .mode_valid = amdgpu_dm_connector_mode_valid, 7591 .atomic_check = amdgpu_dm_connector_atomic_check, 7592 }; 7593 7594 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7595 { 7596 7597 } 7598 7599 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 7600 { 7601 switch (display_color_depth) { 7602 case COLOR_DEPTH_666: 7603 return 6; 7604 case COLOR_DEPTH_888: 7605 return 8; 7606 case COLOR_DEPTH_101010: 7607 return 10; 7608 case COLOR_DEPTH_121212: 7609 return 12; 7610 case COLOR_DEPTH_141414: 7611 return 14; 7612 case COLOR_DEPTH_161616: 7613 return 16; 7614 default: 7615 break; 7616 } 7617 return 0; 7618 } 7619 7620 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7621 struct drm_crtc_state *crtc_state, 7622 struct drm_connector_state *conn_state) 7623 { 7624 struct drm_atomic_state *state = crtc_state->state; 7625 struct drm_connector *connector = conn_state->connector; 7626 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7627 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7628 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7629 struct drm_dp_mst_topology_mgr *mst_mgr; 7630 struct drm_dp_mst_port *mst_port; 7631 struct drm_dp_mst_topology_state *mst_state; 7632 enum dc_color_depth color_depth; 7633 int clock, bpp = 0; 7634 bool is_y420 = false; 7635 7636 if (!aconnector->mst_output_port) 7637 return 0; 7638 7639 mst_port = aconnector->mst_output_port; 7640 mst_mgr = &aconnector->mst_root->mst_mgr; 7641 7642 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7643 return 0; 7644 7645 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 7646 if (IS_ERR(mst_state)) 7647 return PTR_ERR(mst_state); 7648 7649 mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); 7650 7651 if (!state->duplicated) { 7652 int max_bpc = conn_state->max_requested_bpc; 7653 7654 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7655 aconnector->force_yuv420_output; 7656 color_depth = convert_color_depth_from_display_info(connector, 7657 is_y420, 7658 max_bpc); 7659 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7660 clock = adjusted_mode->clock; 7661 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 7662 } 7663 7664 dm_new_connector_state->vcpi_slots = 7665 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 7666 dm_new_connector_state->pbn); 7667 if (dm_new_connector_state->vcpi_slots < 0) { 7668 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7669 return dm_new_connector_state->vcpi_slots; 7670 } 7671 return 0; 7672 } 7673 7674 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7675 .disable = dm_encoder_helper_disable, 7676 .atomic_check = dm_encoder_helper_atomic_check 7677 }; 7678 7679 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7680 struct dc_state *dc_state, 7681 struct dsc_mst_fairness_vars *vars) 7682 { 7683 struct dc_stream_state *stream = NULL; 7684 struct drm_connector *connector; 7685 struct drm_connector_state *new_con_state; 7686 struct amdgpu_dm_connector *aconnector; 7687 struct dm_connector_state *dm_conn_state; 7688 int i, j, ret; 7689 int vcpi, pbn_div, pbn = 0, slot_num = 0; 7690 7691 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7692 7693 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7694 continue; 7695 7696 aconnector = to_amdgpu_dm_connector(connector); 7697 7698 if (!aconnector->mst_output_port) 7699 continue; 7700 7701 if (!new_con_state || !new_con_state->crtc) 7702 continue; 7703 7704 dm_conn_state = to_dm_connector_state(new_con_state); 7705 7706 for (j = 0; j < dc_state->stream_count; j++) { 7707 stream = dc_state->streams[j]; 7708 if (!stream) 7709 continue; 7710 7711 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 7712 break; 7713 7714 stream = NULL; 7715 } 7716 7717 if (!stream) 7718 continue; 7719 7720 pbn_div = dm_mst_get_pbn_divider(stream->link); 7721 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7722 for (j = 0; j < dc_state->stream_count; j++) { 7723 if (vars[j].aconnector == aconnector) { 7724 pbn = vars[j].pbn; 7725 break; 7726 } 7727 } 7728 7729 if (j == dc_state->stream_count || pbn_div == 0) 7730 continue; 7731 7732 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7733 7734 if (stream->timing.flags.DSC != 1) { 7735 dm_conn_state->pbn = pbn; 7736 dm_conn_state->vcpi_slots = slot_num; 7737 7738 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 7739 dm_conn_state->pbn, false); 7740 if (ret < 0) 7741 return ret; 7742 7743 continue; 7744 } 7745 7746 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 7747 if (vcpi < 0) 7748 return vcpi; 7749 7750 dm_conn_state->pbn = pbn; 7751 dm_conn_state->vcpi_slots = vcpi; 7752 } 7753 return 0; 7754 } 7755 7756 static int to_drm_connector_type(enum signal_type st) 7757 { 7758 switch (st) { 7759 case SIGNAL_TYPE_HDMI_TYPE_A: 7760 return DRM_MODE_CONNECTOR_HDMIA; 7761 case SIGNAL_TYPE_EDP: 7762 return DRM_MODE_CONNECTOR_eDP; 7763 case SIGNAL_TYPE_LVDS: 7764 return DRM_MODE_CONNECTOR_LVDS; 7765 case SIGNAL_TYPE_RGB: 7766 return DRM_MODE_CONNECTOR_VGA; 7767 case SIGNAL_TYPE_DISPLAY_PORT: 7768 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7769 return DRM_MODE_CONNECTOR_DisplayPort; 7770 case SIGNAL_TYPE_DVI_DUAL_LINK: 7771 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7772 return DRM_MODE_CONNECTOR_DVID; 7773 case SIGNAL_TYPE_VIRTUAL: 7774 return DRM_MODE_CONNECTOR_VIRTUAL; 7775 7776 default: 7777 return DRM_MODE_CONNECTOR_Unknown; 7778 } 7779 } 7780 7781 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7782 { 7783 struct drm_encoder *encoder; 7784 7785 /* There is only one encoder per connector */ 7786 drm_connector_for_each_possible_encoder(connector, encoder) 7787 return encoder; 7788 7789 return NULL; 7790 } 7791 7792 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7793 { 7794 struct drm_encoder *encoder; 7795 struct amdgpu_encoder *amdgpu_encoder; 7796 7797 encoder = amdgpu_dm_connector_to_encoder(connector); 7798 7799 if (encoder == NULL) 7800 return; 7801 7802 amdgpu_encoder = to_amdgpu_encoder(encoder); 7803 7804 amdgpu_encoder->native_mode.clock = 0; 7805 7806 if (!list_empty(&connector->probed_modes)) { 7807 struct drm_display_mode *preferred_mode = NULL; 7808 7809 list_for_each_entry(preferred_mode, 7810 &connector->probed_modes, 7811 head) { 7812 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7813 amdgpu_encoder->native_mode = *preferred_mode; 7814 7815 break; 7816 } 7817 7818 } 7819 } 7820 7821 static struct drm_display_mode * 7822 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7823 char *name, 7824 int hdisplay, int vdisplay) 7825 { 7826 struct drm_device *dev = encoder->dev; 7827 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7828 struct drm_display_mode *mode = NULL; 7829 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7830 7831 mode = drm_mode_duplicate(dev, native_mode); 7832 7833 if (mode == NULL) 7834 return NULL; 7835 7836 mode->hdisplay = hdisplay; 7837 mode->vdisplay = vdisplay; 7838 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7839 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7840 7841 return mode; 7842 7843 } 7844 7845 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7846 struct drm_connector *connector) 7847 { 7848 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7849 struct drm_display_mode *mode = NULL; 7850 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7851 struct amdgpu_dm_connector *amdgpu_dm_connector = 7852 to_amdgpu_dm_connector(connector); 7853 int i; 7854 int n; 7855 struct mode_size { 7856 char name[DRM_DISPLAY_MODE_LEN]; 7857 int w; 7858 int h; 7859 } common_modes[] = { 7860 { "640x480", 640, 480}, 7861 { "800x600", 800, 600}, 7862 { "1024x768", 1024, 768}, 7863 { "1280x720", 1280, 720}, 7864 { "1280x800", 1280, 800}, 7865 {"1280x1024", 1280, 1024}, 7866 { "1440x900", 1440, 900}, 7867 {"1680x1050", 1680, 1050}, 7868 {"1600x1200", 1600, 1200}, 7869 {"1920x1080", 1920, 1080}, 7870 {"1920x1200", 1920, 1200} 7871 }; 7872 7873 n = ARRAY_SIZE(common_modes); 7874 7875 for (i = 0; i < n; i++) { 7876 struct drm_display_mode *curmode = NULL; 7877 bool mode_existed = false; 7878 7879 if (common_modes[i].w > native_mode->hdisplay || 7880 common_modes[i].h > native_mode->vdisplay || 7881 (common_modes[i].w == native_mode->hdisplay && 7882 common_modes[i].h == native_mode->vdisplay)) 7883 continue; 7884 7885 list_for_each_entry(curmode, &connector->probed_modes, head) { 7886 if (common_modes[i].w == curmode->hdisplay && 7887 common_modes[i].h == curmode->vdisplay) { 7888 mode_existed = true; 7889 break; 7890 } 7891 } 7892 7893 if (mode_existed) 7894 continue; 7895 7896 mode = amdgpu_dm_create_common_mode(encoder, 7897 common_modes[i].name, common_modes[i].w, 7898 common_modes[i].h); 7899 if (!mode) 7900 continue; 7901 7902 drm_mode_probed_add(connector, mode); 7903 amdgpu_dm_connector->num_modes++; 7904 } 7905 } 7906 7907 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7908 { 7909 struct drm_encoder *encoder; 7910 struct amdgpu_encoder *amdgpu_encoder; 7911 const struct drm_display_mode *native_mode; 7912 7913 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7914 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7915 return; 7916 7917 mutex_lock(&connector->dev->mode_config.mutex); 7918 amdgpu_dm_connector_get_modes(connector); 7919 mutex_unlock(&connector->dev->mode_config.mutex); 7920 7921 encoder = amdgpu_dm_connector_to_encoder(connector); 7922 if (!encoder) 7923 return; 7924 7925 amdgpu_encoder = to_amdgpu_encoder(encoder); 7926 7927 native_mode = &amdgpu_encoder->native_mode; 7928 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7929 return; 7930 7931 drm_connector_set_panel_orientation_with_quirk(connector, 7932 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7933 native_mode->hdisplay, 7934 native_mode->vdisplay); 7935 } 7936 7937 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7938 const struct drm_edid *drm_edid) 7939 { 7940 struct amdgpu_dm_connector *amdgpu_dm_connector = 7941 to_amdgpu_dm_connector(connector); 7942 7943 if (drm_edid) { 7944 /* empty probed_modes */ 7945 INIT_LIST_HEAD(&connector->probed_modes); 7946 amdgpu_dm_connector->num_modes = 7947 drm_edid_connector_add_modes(connector); 7948 7949 /* sorting the probed modes before calling function 7950 * amdgpu_dm_get_native_mode() since EDID can have 7951 * more than one preferred mode. The modes that are 7952 * later in the probed mode list could be of higher 7953 * and preferred resolution. For example, 3840x2160 7954 * resolution in base EDID preferred timing and 4096x2160 7955 * preferred resolution in DID extension block later. 7956 */ 7957 drm_mode_sort(&connector->probed_modes); 7958 amdgpu_dm_get_native_mode(connector); 7959 7960 /* Freesync capabilities are reset by calling 7961 * drm_edid_connector_add_modes() and need to be 7962 * restored here. 7963 */ 7964 amdgpu_dm_update_freesync_caps(connector, drm_edid); 7965 } else { 7966 amdgpu_dm_connector->num_modes = 0; 7967 } 7968 } 7969 7970 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7971 struct drm_display_mode *mode) 7972 { 7973 struct drm_display_mode *m; 7974 7975 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7976 if (drm_mode_equal(m, mode)) 7977 return true; 7978 } 7979 7980 return false; 7981 } 7982 7983 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7984 { 7985 const struct drm_display_mode *m; 7986 struct drm_display_mode *new_mode; 7987 uint i; 7988 u32 new_modes_count = 0; 7989 7990 /* Standard FPS values 7991 * 7992 * 23.976 - TV/NTSC 7993 * 24 - Cinema 7994 * 25 - TV/PAL 7995 * 29.97 - TV/NTSC 7996 * 30 - TV/NTSC 7997 * 48 - Cinema HFR 7998 * 50 - TV/PAL 7999 * 60 - Commonly used 8000 * 48,72,96,120 - Multiples of 24 8001 */ 8002 static const u32 common_rates[] = { 8003 23976, 24000, 25000, 29970, 30000, 8004 48000, 50000, 60000, 72000, 96000, 120000 8005 }; 8006 8007 /* 8008 * Find mode with highest refresh rate with the same resolution 8009 * as the preferred mode. Some monitors report a preferred mode 8010 * with lower resolution than the highest refresh rate supported. 8011 */ 8012 8013 m = get_highest_refresh_rate_mode(aconnector, true); 8014 if (!m) 8015 return 0; 8016 8017 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8018 u64 target_vtotal, target_vtotal_diff; 8019 u64 num, den; 8020 8021 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8022 continue; 8023 8024 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8025 common_rates[i] > aconnector->max_vfreq * 1000) 8026 continue; 8027 8028 num = (unsigned long long)m->clock * 1000 * 1000; 8029 den = common_rates[i] * (unsigned long long)m->htotal; 8030 target_vtotal = div_u64(num, den); 8031 target_vtotal_diff = target_vtotal - m->vtotal; 8032 8033 /* Check for illegal modes */ 8034 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8035 m->vsync_end + target_vtotal_diff < m->vsync_start || 8036 m->vtotal + target_vtotal_diff < m->vsync_end) 8037 continue; 8038 8039 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8040 if (!new_mode) 8041 goto out; 8042 8043 new_mode->vtotal += (u16)target_vtotal_diff; 8044 new_mode->vsync_start += (u16)target_vtotal_diff; 8045 new_mode->vsync_end += (u16)target_vtotal_diff; 8046 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8047 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8048 8049 if (!is_duplicate_mode(aconnector, new_mode)) { 8050 drm_mode_probed_add(&aconnector->base, new_mode); 8051 new_modes_count += 1; 8052 } else 8053 drm_mode_destroy(aconnector->base.dev, new_mode); 8054 } 8055 out: 8056 return new_modes_count; 8057 } 8058 8059 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8060 const struct drm_edid *drm_edid) 8061 { 8062 struct amdgpu_dm_connector *amdgpu_dm_connector = 8063 to_amdgpu_dm_connector(connector); 8064 8065 if (!(amdgpu_freesync_vid_mode && drm_edid)) 8066 return; 8067 8068 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8069 amdgpu_dm_connector->num_modes += 8070 add_fs_modes(amdgpu_dm_connector); 8071 } 8072 8073 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8074 { 8075 struct amdgpu_dm_connector *amdgpu_dm_connector = 8076 to_amdgpu_dm_connector(connector); 8077 struct drm_encoder *encoder; 8078 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; 8079 struct dc_link_settings *verified_link_cap = 8080 &amdgpu_dm_connector->dc_link->verified_link_cap; 8081 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 8082 8083 encoder = amdgpu_dm_connector_to_encoder(connector); 8084 8085 if (!drm_edid) { 8086 amdgpu_dm_connector->num_modes = 8087 drm_add_modes_noedid(connector, 640, 480); 8088 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 8089 amdgpu_dm_connector->num_modes += 8090 drm_add_modes_noedid(connector, 1920, 1080); 8091 } else { 8092 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); 8093 if (encoder) 8094 amdgpu_dm_connector_add_common_modes(encoder, connector); 8095 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); 8096 } 8097 amdgpu_dm_fbc_init(connector); 8098 8099 return amdgpu_dm_connector->num_modes; 8100 } 8101 8102 static const u32 supported_colorspaces = 8103 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 8104 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 8105 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 8106 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 8107 8108 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8109 struct amdgpu_dm_connector *aconnector, 8110 int connector_type, 8111 struct dc_link *link, 8112 int link_index) 8113 { 8114 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8115 8116 /* 8117 * Some of the properties below require access to state, like bpc. 8118 * Allocate some default initial connector state with our reset helper. 8119 */ 8120 if (aconnector->base.funcs->reset) 8121 aconnector->base.funcs->reset(&aconnector->base); 8122 8123 aconnector->connector_id = link_index; 8124 aconnector->bl_idx = -1; 8125 aconnector->dc_link = link; 8126 aconnector->base.interlace_allowed = false; 8127 aconnector->base.doublescan_allowed = false; 8128 aconnector->base.stereo_allowed = false; 8129 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8130 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8131 aconnector->audio_inst = -1; 8132 aconnector->pack_sdp_v1_3 = false; 8133 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 8134 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 8135 mutex_init(&aconnector->hpd_lock); 8136 mutex_init(&aconnector->handle_mst_msg_ready); 8137 8138 /* 8139 * configure support HPD hot plug connector_>polled default value is 0 8140 * which means HPD hot plug not supported 8141 */ 8142 switch (connector_type) { 8143 case DRM_MODE_CONNECTOR_HDMIA: 8144 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8145 aconnector->base.ycbcr_420_allowed = 8146 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8147 break; 8148 case DRM_MODE_CONNECTOR_DisplayPort: 8149 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8150 link->link_enc = link_enc_cfg_get_link_enc(link); 8151 ASSERT(link->link_enc); 8152 if (link->link_enc) 8153 aconnector->base.ycbcr_420_allowed = 8154 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8155 break; 8156 case DRM_MODE_CONNECTOR_DVID: 8157 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8158 break; 8159 default: 8160 break; 8161 } 8162 8163 drm_object_attach_property(&aconnector->base.base, 8164 dm->ddev->mode_config.scaling_mode_property, 8165 DRM_MODE_SCALE_NONE); 8166 8167 drm_object_attach_property(&aconnector->base.base, 8168 adev->mode_info.underscan_property, 8169 UNDERSCAN_OFF); 8170 drm_object_attach_property(&aconnector->base.base, 8171 adev->mode_info.underscan_hborder_property, 8172 0); 8173 drm_object_attach_property(&aconnector->base.base, 8174 adev->mode_info.underscan_vborder_property, 8175 0); 8176 8177 if (!aconnector->mst_root) 8178 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8179 8180 aconnector->base.state->max_bpc = 16; 8181 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8182 8183 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8184 /* Content Type is currently only implemented for HDMI. */ 8185 drm_connector_attach_content_type_property(&aconnector->base); 8186 } 8187 8188 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8189 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 8190 drm_connector_attach_colorspace_property(&aconnector->base); 8191 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 8192 connector_type == DRM_MODE_CONNECTOR_eDP) { 8193 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 8194 drm_connector_attach_colorspace_property(&aconnector->base); 8195 } 8196 8197 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8198 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8199 connector_type == DRM_MODE_CONNECTOR_eDP) { 8200 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8201 8202 if (!aconnector->mst_root) 8203 drm_connector_attach_vrr_capable_property(&aconnector->base); 8204 8205 if (adev->dm.hdcp_workqueue) 8206 drm_connector_attach_content_protection_property(&aconnector->base, true); 8207 } 8208 } 8209 8210 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8211 struct i2c_msg *msgs, int num) 8212 { 8213 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8214 struct ddc_service *ddc_service = i2c->ddc_service; 8215 struct i2c_command cmd; 8216 int i; 8217 int result = -EIO; 8218 8219 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 8220 return result; 8221 8222 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8223 8224 if (!cmd.payloads) 8225 return result; 8226 8227 cmd.number_of_payloads = num; 8228 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8229 cmd.speed = 100; 8230 8231 for (i = 0; i < num; i++) { 8232 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8233 cmd.payloads[i].address = msgs[i].addr; 8234 cmd.payloads[i].length = msgs[i].len; 8235 cmd.payloads[i].data = msgs[i].buf; 8236 } 8237 8238 if (dc_submit_i2c( 8239 ddc_service->ctx->dc, 8240 ddc_service->link->link_index, 8241 &cmd)) 8242 result = num; 8243 8244 kfree(cmd.payloads); 8245 return result; 8246 } 8247 8248 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8249 { 8250 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8251 } 8252 8253 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8254 .master_xfer = amdgpu_dm_i2c_xfer, 8255 .functionality = amdgpu_dm_i2c_func, 8256 }; 8257 8258 static struct amdgpu_i2c_adapter * 8259 create_i2c(struct ddc_service *ddc_service, 8260 int link_index, 8261 int *res) 8262 { 8263 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8264 struct amdgpu_i2c_adapter *i2c; 8265 8266 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8267 if (!i2c) 8268 return NULL; 8269 i2c->base.owner = THIS_MODULE; 8270 i2c->base.dev.parent = &adev->pdev->dev; 8271 i2c->base.algo = &amdgpu_dm_i2c_algo; 8272 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8273 i2c_set_adapdata(&i2c->base, i2c); 8274 i2c->ddc_service = ddc_service; 8275 8276 return i2c; 8277 } 8278 8279 8280 /* 8281 * Note: this function assumes that dc_link_detect() was called for the 8282 * dc_link which will be represented by this aconnector. 8283 */ 8284 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8285 struct amdgpu_dm_connector *aconnector, 8286 u32 link_index, 8287 struct amdgpu_encoder *aencoder) 8288 { 8289 int res = 0; 8290 int connector_type; 8291 struct dc *dc = dm->dc; 8292 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8293 struct amdgpu_i2c_adapter *i2c; 8294 8295 /* Not needed for writeback connector */ 8296 link->priv = aconnector; 8297 8298 8299 i2c = create_i2c(link->ddc, link->link_index, &res); 8300 if (!i2c) { 8301 DRM_ERROR("Failed to create i2c adapter data\n"); 8302 return -ENOMEM; 8303 } 8304 8305 aconnector->i2c = i2c; 8306 res = i2c_add_adapter(&i2c->base); 8307 8308 if (res) { 8309 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8310 goto out_free; 8311 } 8312 8313 connector_type = to_drm_connector_type(link->connector_signal); 8314 8315 res = drm_connector_init_with_ddc( 8316 dm->ddev, 8317 &aconnector->base, 8318 &amdgpu_dm_connector_funcs, 8319 connector_type, 8320 &i2c->base); 8321 8322 if (res) { 8323 DRM_ERROR("connector_init failed\n"); 8324 aconnector->connector_id = -1; 8325 goto out_free; 8326 } 8327 8328 drm_connector_helper_add( 8329 &aconnector->base, 8330 &amdgpu_dm_connector_helper_funcs); 8331 8332 amdgpu_dm_connector_init_helper( 8333 dm, 8334 aconnector, 8335 connector_type, 8336 link, 8337 link_index); 8338 8339 drm_connector_attach_encoder( 8340 &aconnector->base, &aencoder->base); 8341 8342 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8343 || connector_type == DRM_MODE_CONNECTOR_eDP) 8344 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8345 8346 out_free: 8347 if (res) { 8348 kfree(i2c); 8349 aconnector->i2c = NULL; 8350 } 8351 return res; 8352 } 8353 8354 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8355 { 8356 switch (adev->mode_info.num_crtc) { 8357 case 1: 8358 return 0x1; 8359 case 2: 8360 return 0x3; 8361 case 3: 8362 return 0x7; 8363 case 4: 8364 return 0xf; 8365 case 5: 8366 return 0x1f; 8367 case 6: 8368 default: 8369 return 0x3f; 8370 } 8371 } 8372 8373 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8374 struct amdgpu_encoder *aencoder, 8375 uint32_t link_index) 8376 { 8377 struct amdgpu_device *adev = drm_to_adev(dev); 8378 8379 int res = drm_encoder_init(dev, 8380 &aencoder->base, 8381 &amdgpu_dm_encoder_funcs, 8382 DRM_MODE_ENCODER_TMDS, 8383 NULL); 8384 8385 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8386 8387 if (!res) 8388 aencoder->encoder_id = link_index; 8389 else 8390 aencoder->encoder_id = -1; 8391 8392 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8393 8394 return res; 8395 } 8396 8397 static void manage_dm_interrupts(struct amdgpu_device *adev, 8398 struct amdgpu_crtc *acrtc, 8399 struct dm_crtc_state *acrtc_state) 8400 { 8401 struct drm_vblank_crtc_config config = {0}; 8402 struct dc_crtc_timing *timing; 8403 int offdelay; 8404 8405 if (acrtc_state) { 8406 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8407 IP_VERSION(3, 5, 0) || 8408 acrtc_state->stream->link->psr_settings.psr_version < 8409 DC_PSR_VERSION_UNSUPPORTED || 8410 !(adev->flags & AMD_IS_APU)) { 8411 timing = &acrtc_state->stream->timing; 8412 8413 /* at least 2 frames */ 8414 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8415 timing->v_total * 8416 timing->h_total, 8417 timing->pix_clk_100hz); 8418 8419 config.offdelay_ms = offdelay ?: 30; 8420 } else { 8421 config.disable_immediate = true; 8422 } 8423 8424 drm_crtc_vblank_on_config(&acrtc->base, 8425 &config); 8426 } else { 8427 drm_crtc_vblank_off(&acrtc->base); 8428 } 8429 } 8430 8431 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8432 struct amdgpu_crtc *acrtc) 8433 { 8434 int irq_type = 8435 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8436 8437 /** 8438 * This reads the current state for the IRQ and force reapplies 8439 * the setting to hardware. 8440 */ 8441 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8442 } 8443 8444 static bool 8445 is_scaling_state_different(const struct dm_connector_state *dm_state, 8446 const struct dm_connector_state *old_dm_state) 8447 { 8448 if (dm_state->scaling != old_dm_state->scaling) 8449 return true; 8450 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8451 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8452 return true; 8453 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8454 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8455 return true; 8456 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8457 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8458 return true; 8459 return false; 8460 } 8461 8462 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 8463 struct drm_crtc_state *old_crtc_state, 8464 struct drm_connector_state *new_conn_state, 8465 struct drm_connector_state *old_conn_state, 8466 const struct drm_connector *connector, 8467 struct hdcp_workqueue *hdcp_w) 8468 { 8469 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8470 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8471 8472 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8473 connector->index, connector->status, connector->dpms); 8474 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8475 old_conn_state->content_protection, new_conn_state->content_protection); 8476 8477 if (old_crtc_state) 8478 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8479 old_crtc_state->enable, 8480 old_crtc_state->active, 8481 old_crtc_state->mode_changed, 8482 old_crtc_state->active_changed, 8483 old_crtc_state->connectors_changed); 8484 8485 if (new_crtc_state) 8486 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8487 new_crtc_state->enable, 8488 new_crtc_state->active, 8489 new_crtc_state->mode_changed, 8490 new_crtc_state->active_changed, 8491 new_crtc_state->connectors_changed); 8492 8493 /* hdcp content type change */ 8494 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 8495 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8496 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8497 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 8498 return true; 8499 } 8500 8501 /* CP is being re enabled, ignore this */ 8502 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8503 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8504 if (new_crtc_state && new_crtc_state->mode_changed) { 8505 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8506 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 8507 return true; 8508 } 8509 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8510 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 8511 return false; 8512 } 8513 8514 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8515 * 8516 * Handles: UNDESIRED -> ENABLED 8517 */ 8518 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8519 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8520 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8521 8522 /* Stream removed and re-enabled 8523 * 8524 * Can sometimes overlap with the HPD case, 8525 * thus set update_hdcp to false to avoid 8526 * setting HDCP multiple times. 8527 * 8528 * Handles: DESIRED -> DESIRED (Special case) 8529 */ 8530 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 8531 new_conn_state->crtc && new_conn_state->crtc->enabled && 8532 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8533 dm_con_state->update_hdcp = false; 8534 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 8535 __func__); 8536 return true; 8537 } 8538 8539 /* Hot-plug, headless s3, dpms 8540 * 8541 * Only start HDCP if the display is connected/enabled. 8542 * update_hdcp flag will be set to false until the next 8543 * HPD comes in. 8544 * 8545 * Handles: DESIRED -> DESIRED (Special case) 8546 */ 8547 if (dm_con_state->update_hdcp && 8548 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8549 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8550 dm_con_state->update_hdcp = false; 8551 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 8552 __func__); 8553 return true; 8554 } 8555 8556 if (old_conn_state->content_protection == new_conn_state->content_protection) { 8557 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8558 if (new_crtc_state && new_crtc_state->mode_changed) { 8559 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 8560 __func__); 8561 return true; 8562 } 8563 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 8564 __func__); 8565 return false; 8566 } 8567 8568 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 8569 return false; 8570 } 8571 8572 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8573 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 8574 __func__); 8575 return true; 8576 } 8577 8578 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 8579 return false; 8580 } 8581 8582 static void remove_stream(struct amdgpu_device *adev, 8583 struct amdgpu_crtc *acrtc, 8584 struct dc_stream_state *stream) 8585 { 8586 /* this is the update mode case */ 8587 8588 acrtc->otg_inst = -1; 8589 acrtc->enabled = false; 8590 } 8591 8592 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8593 { 8594 8595 assert_spin_locked(&acrtc->base.dev->event_lock); 8596 WARN_ON(acrtc->event); 8597 8598 acrtc->event = acrtc->base.state->event; 8599 8600 /* Set the flip status */ 8601 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8602 8603 /* Mark this event as consumed */ 8604 acrtc->base.state->event = NULL; 8605 8606 drm_dbg_state(acrtc->base.dev, 8607 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8608 acrtc->crtc_id); 8609 } 8610 8611 static void update_freesync_state_on_stream( 8612 struct amdgpu_display_manager *dm, 8613 struct dm_crtc_state *new_crtc_state, 8614 struct dc_stream_state *new_stream, 8615 struct dc_plane_state *surface, 8616 u32 flip_timestamp_in_us) 8617 { 8618 struct mod_vrr_params vrr_params; 8619 struct dc_info_packet vrr_infopacket = {0}; 8620 struct amdgpu_device *adev = dm->adev; 8621 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8622 unsigned long flags; 8623 bool pack_sdp_v1_3 = false; 8624 struct amdgpu_dm_connector *aconn; 8625 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 8626 8627 if (!new_stream) 8628 return; 8629 8630 /* 8631 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8632 * For now it's sufficient to just guard against these conditions. 8633 */ 8634 8635 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8636 return; 8637 8638 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8639 vrr_params = acrtc->dm_irq_params.vrr_params; 8640 8641 if (surface) { 8642 mod_freesync_handle_preflip( 8643 dm->freesync_module, 8644 surface, 8645 new_stream, 8646 flip_timestamp_in_us, 8647 &vrr_params); 8648 8649 if (adev->family < AMDGPU_FAMILY_AI && 8650 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 8651 mod_freesync_handle_v_update(dm->freesync_module, 8652 new_stream, &vrr_params); 8653 8654 /* Need to call this before the frame ends. */ 8655 dc_stream_adjust_vmin_vmax(dm->dc, 8656 new_crtc_state->stream, 8657 &vrr_params.adjust); 8658 } 8659 } 8660 8661 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 8662 8663 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 8664 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 8665 8666 if (aconn->vsdb_info.amd_vsdb_version == 1) 8667 packet_type = PACKET_TYPE_FS_V1; 8668 else if (aconn->vsdb_info.amd_vsdb_version == 2) 8669 packet_type = PACKET_TYPE_FS_V2; 8670 else if (aconn->vsdb_info.amd_vsdb_version == 3) 8671 packet_type = PACKET_TYPE_FS_V3; 8672 8673 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 8674 &new_stream->adaptive_sync_infopacket); 8675 } 8676 8677 mod_freesync_build_vrr_infopacket( 8678 dm->freesync_module, 8679 new_stream, 8680 &vrr_params, 8681 packet_type, 8682 TRANSFER_FUNC_UNKNOWN, 8683 &vrr_infopacket, 8684 pack_sdp_v1_3); 8685 8686 new_crtc_state->freesync_vrr_info_changed |= 8687 (memcmp(&new_crtc_state->vrr_infopacket, 8688 &vrr_infopacket, 8689 sizeof(vrr_infopacket)) != 0); 8690 8691 acrtc->dm_irq_params.vrr_params = vrr_params; 8692 new_crtc_state->vrr_infopacket = vrr_infopacket; 8693 8694 new_stream->vrr_infopacket = vrr_infopacket; 8695 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 8696 8697 if (new_crtc_state->freesync_vrr_info_changed) 8698 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8699 new_crtc_state->base.crtc->base.id, 8700 (int)new_crtc_state->base.vrr_enabled, 8701 (int)vrr_params.state); 8702 8703 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8704 } 8705 8706 static void update_stream_irq_parameters( 8707 struct amdgpu_display_manager *dm, 8708 struct dm_crtc_state *new_crtc_state) 8709 { 8710 struct dc_stream_state *new_stream = new_crtc_state->stream; 8711 struct mod_vrr_params vrr_params; 8712 struct mod_freesync_config config = new_crtc_state->freesync_config; 8713 struct amdgpu_device *adev = dm->adev; 8714 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8715 unsigned long flags; 8716 8717 if (!new_stream) 8718 return; 8719 8720 /* 8721 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8722 * For now it's sufficient to just guard against these conditions. 8723 */ 8724 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8725 return; 8726 8727 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8728 vrr_params = acrtc->dm_irq_params.vrr_params; 8729 8730 if (new_crtc_state->vrr_supported && 8731 config.min_refresh_in_uhz && 8732 config.max_refresh_in_uhz) { 8733 /* 8734 * if freesync compatible mode was set, config.state will be set 8735 * in atomic check 8736 */ 8737 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8738 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8739 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8740 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8741 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8742 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8743 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8744 } else { 8745 config.state = new_crtc_state->base.vrr_enabled ? 8746 VRR_STATE_ACTIVE_VARIABLE : 8747 VRR_STATE_INACTIVE; 8748 } 8749 } else { 8750 config.state = VRR_STATE_UNSUPPORTED; 8751 } 8752 8753 mod_freesync_build_vrr_params(dm->freesync_module, 8754 new_stream, 8755 &config, &vrr_params); 8756 8757 new_crtc_state->freesync_config = config; 8758 /* Copy state for access from DM IRQ handler */ 8759 acrtc->dm_irq_params.freesync_config = config; 8760 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8761 acrtc->dm_irq_params.vrr_params = vrr_params; 8762 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8763 } 8764 8765 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8766 struct dm_crtc_state *new_state) 8767 { 8768 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 8769 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 8770 8771 if (!old_vrr_active && new_vrr_active) { 8772 /* Transition VRR inactive -> active: 8773 * While VRR is active, we must not disable vblank irq, as a 8774 * reenable after disable would compute bogus vblank/pflip 8775 * timestamps if it likely happened inside display front-porch. 8776 * 8777 * We also need vupdate irq for the actual core vblank handling 8778 * at end of vblank. 8779 */ 8780 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 8781 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 8782 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8783 __func__, new_state->base.crtc->base.id); 8784 } else if (old_vrr_active && !new_vrr_active) { 8785 /* Transition VRR active -> inactive: 8786 * Allow vblank irq disable again for fixed refresh rate. 8787 */ 8788 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 8789 drm_crtc_vblank_put(new_state->base.crtc); 8790 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8791 __func__, new_state->base.crtc->base.id); 8792 } 8793 } 8794 8795 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8796 { 8797 struct drm_plane *plane; 8798 struct drm_plane_state *old_plane_state; 8799 int i; 8800 8801 /* 8802 * TODO: Make this per-stream so we don't issue redundant updates for 8803 * commits with multiple streams. 8804 */ 8805 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8806 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8807 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8808 } 8809 8810 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8811 { 8812 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8813 8814 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8815 } 8816 8817 static void amdgpu_dm_update_cursor(struct drm_plane *plane, 8818 struct drm_plane_state *old_plane_state, 8819 struct dc_stream_update *update) 8820 { 8821 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8822 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8823 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8824 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8825 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8826 uint64_t address = afb ? afb->address : 0; 8827 struct dc_cursor_position position = {0}; 8828 struct dc_cursor_attributes attributes; 8829 int ret; 8830 8831 if (!plane->state->fb && !old_plane_state->fb) 8832 return; 8833 8834 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 8835 amdgpu_crtc->crtc_id, plane->state->crtc_w, 8836 plane->state->crtc_h); 8837 8838 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 8839 if (ret) 8840 return; 8841 8842 if (!position.enable) { 8843 /* turn off cursor */ 8844 if (crtc_state && crtc_state->stream) { 8845 dc_stream_set_cursor_position(crtc_state->stream, 8846 &position); 8847 update->cursor_position = &crtc_state->stream->cursor_position; 8848 } 8849 return; 8850 } 8851 8852 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8853 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8854 8855 memset(&attributes, 0, sizeof(attributes)); 8856 attributes.address.high_part = upper_32_bits(address); 8857 attributes.address.low_part = lower_32_bits(address); 8858 attributes.width = plane->state->crtc_w; 8859 attributes.height = plane->state->crtc_h; 8860 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8861 attributes.rotation_angle = 0; 8862 attributes.attribute_flags.value = 0; 8863 8864 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 8865 * legacy gamma setup. 8866 */ 8867 if (crtc_state->cm_is_degamma_srgb && 8868 adev->dm.dc->caps.color.dpp.gamma_corr) 8869 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 8870 8871 if (afb) 8872 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8873 8874 if (crtc_state->stream) { 8875 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8876 &attributes)) 8877 DRM_ERROR("DC failed to set cursor attributes\n"); 8878 8879 update->cursor_attributes = &crtc_state->stream->cursor_attributes; 8880 8881 if (!dc_stream_set_cursor_position(crtc_state->stream, 8882 &position)) 8883 DRM_ERROR("DC failed to set cursor position\n"); 8884 8885 update->cursor_position = &crtc_state->stream->cursor_position; 8886 } 8887 } 8888 8889 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 8890 const struct dm_crtc_state *acrtc_state, 8891 const u64 current_ts) 8892 { 8893 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 8894 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 8895 struct amdgpu_dm_connector *aconn = 8896 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8897 8898 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 8899 if (pr->config.replay_supported && !pr->replay_feature_enabled) 8900 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 8901 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 8902 !psr->psr_feature_enabled) 8903 if (!aconn->disallow_edp_enter_psr) 8904 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8905 } 8906 8907 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 8908 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8909 (psr->psr_feature_enabled || pr->config.replay_supported)) { 8910 if (aconn->sr_skip_count > 0) 8911 aconn->sr_skip_count--; 8912 8913 /* Allow SR when skip count is 0. */ 8914 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 8915 8916 /* 8917 * If sink supports PSR SU/Panel Replay, there is no need to rely on 8918 * a vblank event disable request to enable PSR/RP. PSR SU/RP 8919 * can be enabled immediately once OS demonstrates an 8920 * adequate number of fast atomic commits to notify KMD 8921 * of update events. See `vblank_control_worker()`. 8922 */ 8923 if (acrtc_attach->dm_irq_params.allow_sr_entry && 8924 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8925 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8926 #endif 8927 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 8928 if (pr->replay_feature_enabled && !pr->replay_allow_active) 8929 amdgpu_dm_replay_enable(acrtc_state->stream, true); 8930 if (psr->psr_version >= DC_PSR_VERSION_SU_1 && 8931 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 8932 amdgpu_dm_psr_enable(acrtc_state->stream); 8933 } 8934 } else { 8935 acrtc_attach->dm_irq_params.allow_sr_entry = false; 8936 } 8937 } 8938 8939 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8940 struct drm_device *dev, 8941 struct amdgpu_display_manager *dm, 8942 struct drm_crtc *pcrtc, 8943 bool wait_for_vblank) 8944 { 8945 u32 i; 8946 u64 timestamp_ns = ktime_get_ns(); 8947 struct drm_plane *plane; 8948 struct drm_plane_state *old_plane_state, *new_plane_state; 8949 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8950 struct drm_crtc_state *new_pcrtc_state = 8951 drm_atomic_get_new_crtc_state(state, pcrtc); 8952 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8953 struct dm_crtc_state *dm_old_crtc_state = 8954 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8955 int planes_count = 0, vpos, hpos; 8956 unsigned long flags; 8957 u32 target_vblank, last_flip_vblank; 8958 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8959 bool cursor_update = false; 8960 bool pflip_present = false; 8961 bool dirty_rects_changed = false; 8962 bool updated_planes_and_streams = false; 8963 struct { 8964 struct dc_surface_update surface_updates[MAX_SURFACES]; 8965 struct dc_plane_info plane_infos[MAX_SURFACES]; 8966 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8967 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8968 struct dc_stream_update stream_update; 8969 } *bundle; 8970 8971 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8972 8973 if (!bundle) { 8974 drm_err(dev, "Failed to allocate update bundle\n"); 8975 goto cleanup; 8976 } 8977 8978 /* 8979 * Disable the cursor first if we're disabling all the planes. 8980 * It'll remain on the screen after the planes are re-enabled 8981 * if we don't. 8982 * 8983 * If the cursor is transitioning from native to overlay mode, the 8984 * native cursor needs to be disabled first. 8985 */ 8986 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && 8987 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 8988 struct dc_cursor_position cursor_position = {0}; 8989 8990 if (!dc_stream_set_cursor_position(acrtc_state->stream, 8991 &cursor_position)) 8992 drm_err(dev, "DC failed to disable native cursor\n"); 8993 8994 bundle->stream_update.cursor_position = 8995 &acrtc_state->stream->cursor_position; 8996 } 8997 8998 if (acrtc_state->active_planes == 0 && 8999 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9000 amdgpu_dm_commit_cursors(state); 9001 9002 /* update planes when needed */ 9003 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9004 struct drm_crtc *crtc = new_plane_state->crtc; 9005 struct drm_crtc_state *new_crtc_state; 9006 struct drm_framebuffer *fb = new_plane_state->fb; 9007 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9008 bool plane_needs_flip; 9009 struct dc_plane_state *dc_plane; 9010 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9011 9012 /* Cursor plane is handled after stream updates */ 9013 if (plane->type == DRM_PLANE_TYPE_CURSOR && 9014 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9015 if ((fb && crtc == pcrtc) || 9016 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { 9017 cursor_update = true; 9018 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) 9019 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); 9020 } 9021 9022 continue; 9023 } 9024 9025 if (!fb || !crtc || pcrtc != crtc) 9026 continue; 9027 9028 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9029 if (!new_crtc_state->active) 9030 continue; 9031 9032 dc_plane = dm_new_plane_state->dc_state; 9033 if (!dc_plane) 9034 continue; 9035 9036 bundle->surface_updates[planes_count].surface = dc_plane; 9037 if (new_pcrtc_state->color_mgmt_changed) { 9038 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; 9039 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; 9040 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9041 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; 9042 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; 9043 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; 9044 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; 9045 } 9046 9047 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 9048 &bundle->scaling_infos[planes_count]); 9049 9050 bundle->surface_updates[planes_count].scaling_info = 9051 &bundle->scaling_infos[planes_count]; 9052 9053 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9054 9055 pflip_present = pflip_present || plane_needs_flip; 9056 9057 if (!plane_needs_flip) { 9058 planes_count += 1; 9059 continue; 9060 } 9061 9062 fill_dc_plane_info_and_addr( 9063 dm->adev, new_plane_state, 9064 afb->tiling_flags, 9065 &bundle->plane_infos[planes_count], 9066 &bundle->flip_addrs[planes_count].address, 9067 afb->tmz_surface); 9068 9069 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9070 new_plane_state->plane->index, 9071 bundle->plane_infos[planes_count].dcc.enable); 9072 9073 bundle->surface_updates[planes_count].plane_info = 9074 &bundle->plane_infos[planes_count]; 9075 9076 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 9077 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9078 fill_dc_dirty_rects(plane, old_plane_state, 9079 new_plane_state, new_crtc_state, 9080 &bundle->flip_addrs[planes_count], 9081 acrtc_state->stream->link->psr_settings.psr_version == 9082 DC_PSR_VERSION_SU_1, 9083 &dirty_rects_changed); 9084 9085 /* 9086 * If the dirty regions changed, PSR-SU need to be disabled temporarily 9087 * and enabled it again after dirty regions are stable to avoid video glitch. 9088 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 9089 * during the PSR-SU was disabled. 9090 */ 9091 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9092 acrtc_attach->dm_irq_params.allow_sr_entry && 9093 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9094 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9095 #endif 9096 dirty_rects_changed) { 9097 mutex_lock(&dm->dc_lock); 9098 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 9099 timestamp_ns; 9100 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9101 amdgpu_dm_psr_disable(acrtc_state->stream); 9102 mutex_unlock(&dm->dc_lock); 9103 } 9104 } 9105 9106 /* 9107 * Only allow immediate flips for fast updates that don't 9108 * change memory domain, FB pitch, DCC state, rotation or 9109 * mirroring. 9110 * 9111 * dm_crtc_helper_atomic_check() only accepts async flips with 9112 * fast updates. 9113 */ 9114 if (crtc->state->async_flip && 9115 (acrtc_state->update_type != UPDATE_TYPE_FAST || 9116 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 9117 drm_warn_once(state->dev, 9118 "[PLANE:%d:%s] async flip with non-fast update\n", 9119 plane->base.id, plane->name); 9120 9121 bundle->flip_addrs[planes_count].flip_immediate = 9122 crtc->state->async_flip && 9123 acrtc_state->update_type == UPDATE_TYPE_FAST && 9124 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 9125 9126 timestamp_ns = ktime_get_ns(); 9127 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9128 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9129 bundle->surface_updates[planes_count].surface = dc_plane; 9130 9131 if (!bundle->surface_updates[planes_count].surface) { 9132 DRM_ERROR("No surface for CRTC: id=%d\n", 9133 acrtc_attach->crtc_id); 9134 continue; 9135 } 9136 9137 if (plane == pcrtc->primary) 9138 update_freesync_state_on_stream( 9139 dm, 9140 acrtc_state, 9141 acrtc_state->stream, 9142 dc_plane, 9143 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9144 9145 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9146 __func__, 9147 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9148 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9149 9150 planes_count += 1; 9151 9152 } 9153 9154 if (pflip_present) { 9155 if (!vrr_active) { 9156 /* Use old throttling in non-vrr fixed refresh rate mode 9157 * to keep flip scheduling based on target vblank counts 9158 * working in a backwards compatible way, e.g., for 9159 * clients using the GLX_OML_sync_control extension or 9160 * DRI3/Present extension with defined target_msc. 9161 */ 9162 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9163 } else { 9164 /* For variable refresh rate mode only: 9165 * Get vblank of last completed flip to avoid > 1 vrr 9166 * flips per video frame by use of throttling, but allow 9167 * flip programming anywhere in the possibly large 9168 * variable vrr vblank interval for fine-grained flip 9169 * timing control and more opportunity to avoid stutter 9170 * on late submission of flips. 9171 */ 9172 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9173 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9174 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9175 } 9176 9177 target_vblank = last_flip_vblank + wait_for_vblank; 9178 9179 /* 9180 * Wait until we're out of the vertical blank period before the one 9181 * targeted by the flip 9182 */ 9183 while ((acrtc_attach->enabled && 9184 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9185 0, &vpos, &hpos, NULL, 9186 NULL, &pcrtc->hwmode) 9187 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9188 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9189 (int)(target_vblank - 9190 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9191 usleep_range(1000, 1100); 9192 } 9193 9194 /** 9195 * Prepare the flip event for the pageflip interrupt to handle. 9196 * 9197 * This only works in the case where we've already turned on the 9198 * appropriate hardware blocks (eg. HUBP) so in the transition case 9199 * from 0 -> n planes we have to skip a hardware generated event 9200 * and rely on sending it from software. 9201 */ 9202 if (acrtc_attach->base.state->event && 9203 acrtc_state->active_planes > 0) { 9204 drm_crtc_vblank_get(pcrtc); 9205 9206 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9207 9208 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9209 prepare_flip_isr(acrtc_attach); 9210 9211 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9212 } 9213 9214 if (acrtc_state->stream) { 9215 if (acrtc_state->freesync_vrr_info_changed) 9216 bundle->stream_update.vrr_infopacket = 9217 &acrtc_state->stream->vrr_infopacket; 9218 } 9219 } else if (cursor_update && acrtc_state->active_planes > 0) { 9220 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9221 if (acrtc_attach->base.state->event) { 9222 drm_crtc_vblank_get(pcrtc); 9223 acrtc_attach->event = acrtc_attach->base.state->event; 9224 acrtc_attach->base.state->event = NULL; 9225 } 9226 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9227 } 9228 9229 /* Update the planes if changed or disable if we don't have any. */ 9230 if ((planes_count || acrtc_state->active_planes == 0) && 9231 acrtc_state->stream) { 9232 /* 9233 * If PSR or idle optimizations are enabled then flush out 9234 * any pending work before hardware programming. 9235 */ 9236 if (dm->vblank_control_workqueue) 9237 flush_workqueue(dm->vblank_control_workqueue); 9238 9239 bundle->stream_update.stream = acrtc_state->stream; 9240 if (new_pcrtc_state->mode_changed) { 9241 bundle->stream_update.src = acrtc_state->stream->src; 9242 bundle->stream_update.dst = acrtc_state->stream->dst; 9243 } 9244 9245 if (new_pcrtc_state->color_mgmt_changed) { 9246 /* 9247 * TODO: This isn't fully correct since we've actually 9248 * already modified the stream in place. 9249 */ 9250 bundle->stream_update.gamut_remap = 9251 &acrtc_state->stream->gamut_remap_matrix; 9252 bundle->stream_update.output_csc_transform = 9253 &acrtc_state->stream->csc_color_matrix; 9254 bundle->stream_update.out_transfer_func = 9255 &acrtc_state->stream->out_transfer_func; 9256 bundle->stream_update.lut3d_func = 9257 (struct dc_3dlut *) acrtc_state->stream->lut3d_func; 9258 bundle->stream_update.func_shaper = 9259 (struct dc_transfer_func *) acrtc_state->stream->func_shaper; 9260 } 9261 9262 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9263 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9264 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9265 9266 mutex_lock(&dm->dc_lock); 9267 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9268 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9269 amdgpu_dm_replay_disable(acrtc_state->stream); 9270 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9271 amdgpu_dm_psr_disable(acrtc_state->stream); 9272 } 9273 mutex_unlock(&dm->dc_lock); 9274 9275 /* 9276 * If FreeSync state on the stream has changed then we need to 9277 * re-adjust the min/max bounds now that DC doesn't handle this 9278 * as part of commit. 9279 */ 9280 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9281 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9282 dc_stream_adjust_vmin_vmax( 9283 dm->dc, acrtc_state->stream, 9284 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9285 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9286 } 9287 mutex_lock(&dm->dc_lock); 9288 update_planes_and_stream_adapter(dm->dc, 9289 acrtc_state->update_type, 9290 planes_count, 9291 acrtc_state->stream, 9292 &bundle->stream_update, 9293 bundle->surface_updates); 9294 updated_planes_and_streams = true; 9295 9296 /** 9297 * Enable or disable the interrupts on the backend. 9298 * 9299 * Most pipes are put into power gating when unused. 9300 * 9301 * When power gating is enabled on a pipe we lose the 9302 * interrupt enablement state when power gating is disabled. 9303 * 9304 * So we need to update the IRQ control state in hardware 9305 * whenever the pipe turns on (since it could be previously 9306 * power gated) or off (since some pipes can't be power gated 9307 * on some ASICs). 9308 */ 9309 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9310 dm_update_pflip_irq_state(drm_to_adev(dev), 9311 acrtc_attach); 9312 9313 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 9314 mutex_unlock(&dm->dc_lock); 9315 } 9316 9317 /* 9318 * Update cursor state *after* programming all the planes. 9319 * This avoids redundant programming in the case where we're going 9320 * to be disabling a single plane - those pipes are being disabled. 9321 */ 9322 if (acrtc_state->active_planes && 9323 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && 9324 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9325 amdgpu_dm_commit_cursors(state); 9326 9327 cleanup: 9328 kfree(bundle); 9329 } 9330 9331 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9332 struct drm_atomic_state *state) 9333 { 9334 struct amdgpu_device *adev = drm_to_adev(dev); 9335 struct amdgpu_dm_connector *aconnector; 9336 struct drm_connector *connector; 9337 struct drm_connector_state *old_con_state, *new_con_state; 9338 struct drm_crtc_state *new_crtc_state; 9339 struct dm_crtc_state *new_dm_crtc_state; 9340 const struct dc_stream_status *status; 9341 int i, inst; 9342 9343 /* Notify device removals. */ 9344 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9345 if (old_con_state->crtc != new_con_state->crtc) { 9346 /* CRTC changes require notification. */ 9347 goto notify; 9348 } 9349 9350 if (!new_con_state->crtc) 9351 continue; 9352 9353 new_crtc_state = drm_atomic_get_new_crtc_state( 9354 state, new_con_state->crtc); 9355 9356 if (!new_crtc_state) 9357 continue; 9358 9359 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9360 continue; 9361 9362 notify: 9363 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9364 continue; 9365 9366 aconnector = to_amdgpu_dm_connector(connector); 9367 9368 mutex_lock(&adev->dm.audio_lock); 9369 inst = aconnector->audio_inst; 9370 aconnector->audio_inst = -1; 9371 mutex_unlock(&adev->dm.audio_lock); 9372 9373 amdgpu_dm_audio_eld_notify(adev, inst); 9374 } 9375 9376 /* Notify audio device additions. */ 9377 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9378 if (!new_con_state->crtc) 9379 continue; 9380 9381 new_crtc_state = drm_atomic_get_new_crtc_state( 9382 state, new_con_state->crtc); 9383 9384 if (!new_crtc_state) 9385 continue; 9386 9387 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9388 continue; 9389 9390 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9391 if (!new_dm_crtc_state->stream) 9392 continue; 9393 9394 status = dc_stream_get_status(new_dm_crtc_state->stream); 9395 if (!status) 9396 continue; 9397 9398 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9399 continue; 9400 9401 aconnector = to_amdgpu_dm_connector(connector); 9402 9403 mutex_lock(&adev->dm.audio_lock); 9404 inst = status->audio_inst; 9405 aconnector->audio_inst = inst; 9406 mutex_unlock(&adev->dm.audio_lock); 9407 9408 amdgpu_dm_audio_eld_notify(adev, inst); 9409 } 9410 } 9411 9412 /* 9413 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9414 * @crtc_state: the DRM CRTC state 9415 * @stream_state: the DC stream state. 9416 * 9417 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9418 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9419 */ 9420 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9421 struct dc_stream_state *stream_state) 9422 { 9423 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9424 } 9425 9426 static void dm_clear_writeback(struct amdgpu_display_manager *dm, 9427 struct dm_crtc_state *crtc_state) 9428 { 9429 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 9430 } 9431 9432 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 9433 struct dc_state *dc_state) 9434 { 9435 struct drm_device *dev = state->dev; 9436 struct amdgpu_device *adev = drm_to_adev(dev); 9437 struct amdgpu_display_manager *dm = &adev->dm; 9438 struct drm_crtc *crtc; 9439 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9440 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9441 struct drm_connector_state *old_con_state; 9442 struct drm_connector *connector; 9443 bool mode_set_reset_required = false; 9444 u32 i; 9445 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 9446 bool set_backlight_level = false; 9447 9448 /* Disable writeback */ 9449 for_each_old_connector_in_state(state, connector, old_con_state, i) { 9450 struct dm_connector_state *dm_old_con_state; 9451 struct amdgpu_crtc *acrtc; 9452 9453 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 9454 continue; 9455 9456 old_crtc_state = NULL; 9457 9458 dm_old_con_state = to_dm_connector_state(old_con_state); 9459 if (!dm_old_con_state->base.crtc) 9460 continue; 9461 9462 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 9463 if (acrtc) 9464 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9465 9466 if (!acrtc || !acrtc->wb_enabled) 9467 continue; 9468 9469 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9470 9471 dm_clear_writeback(dm, dm_old_crtc_state); 9472 acrtc->wb_enabled = false; 9473 } 9474 9475 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9476 new_crtc_state, i) { 9477 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9478 9479 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9480 9481 if (old_crtc_state->active && 9482 (!new_crtc_state->active || 9483 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9484 manage_dm_interrupts(adev, acrtc, NULL); 9485 dc_stream_release(dm_old_crtc_state->stream); 9486 } 9487 } 9488 9489 drm_atomic_helper_calc_timestamping_constants(state); 9490 9491 /* update changed items */ 9492 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9493 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9494 9495 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9496 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9497 9498 drm_dbg_state(state->dev, 9499 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9500 acrtc->crtc_id, 9501 new_crtc_state->enable, 9502 new_crtc_state->active, 9503 new_crtc_state->planes_changed, 9504 new_crtc_state->mode_changed, 9505 new_crtc_state->active_changed, 9506 new_crtc_state->connectors_changed); 9507 9508 /* Disable cursor if disabling crtc */ 9509 if (old_crtc_state->active && !new_crtc_state->active) { 9510 struct dc_cursor_position position; 9511 9512 memset(&position, 0, sizeof(position)); 9513 mutex_lock(&dm->dc_lock); 9514 dc_exit_ips_for_hw_access(dm->dc); 9515 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); 9516 mutex_unlock(&dm->dc_lock); 9517 } 9518 9519 /* Copy all transient state flags into dc state */ 9520 if (dm_new_crtc_state->stream) { 9521 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9522 dm_new_crtc_state->stream); 9523 } 9524 9525 /* handles headless hotplug case, updating new_state and 9526 * aconnector as needed 9527 */ 9528 9529 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9530 9531 drm_dbg_atomic(dev, 9532 "Atomic commit: SET crtc id %d: [%p]\n", 9533 acrtc->crtc_id, acrtc); 9534 9535 if (!dm_new_crtc_state->stream) { 9536 /* 9537 * this could happen because of issues with 9538 * userspace notifications delivery. 9539 * In this case userspace tries to set mode on 9540 * display which is disconnected in fact. 9541 * dc_sink is NULL in this case on aconnector. 9542 * We expect reset mode will come soon. 9543 * 9544 * This can also happen when unplug is done 9545 * during resume sequence ended 9546 * 9547 * In this case, we want to pretend we still 9548 * have a sink to keep the pipe running so that 9549 * hw state is consistent with the sw state 9550 */ 9551 drm_dbg_atomic(dev, 9552 "Failed to create new stream for crtc %d\n", 9553 acrtc->base.base.id); 9554 continue; 9555 } 9556 9557 if (dm_old_crtc_state->stream) 9558 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9559 9560 pm_runtime_get_noresume(dev->dev); 9561 9562 acrtc->enabled = true; 9563 acrtc->hw_mode = new_crtc_state->mode; 9564 crtc->hwmode = new_crtc_state->mode; 9565 mode_set_reset_required = true; 9566 set_backlight_level = true; 9567 } else if (modereset_required(new_crtc_state)) { 9568 drm_dbg_atomic(dev, 9569 "Atomic commit: RESET. crtc id %d:[%p]\n", 9570 acrtc->crtc_id, acrtc); 9571 /* i.e. reset mode */ 9572 if (dm_old_crtc_state->stream) 9573 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9574 9575 mode_set_reset_required = true; 9576 } 9577 } /* for_each_crtc_in_state() */ 9578 9579 /* if there mode set or reset, disable eDP PSR, Replay */ 9580 if (mode_set_reset_required) { 9581 if (dm->vblank_control_workqueue) 9582 flush_workqueue(dm->vblank_control_workqueue); 9583 9584 amdgpu_dm_replay_disable_all(dm); 9585 amdgpu_dm_psr_disable_all(dm); 9586 } 9587 9588 dm_enable_per_frame_crtc_master_sync(dc_state); 9589 mutex_lock(&dm->dc_lock); 9590 dc_exit_ips_for_hw_access(dm->dc); 9591 WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); 9592 9593 /* Allow idle optimization when vblank count is 0 for display off */ 9594 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) 9595 dc_allow_idle_optimizations(dm->dc, true); 9596 mutex_unlock(&dm->dc_lock); 9597 9598 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9599 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9600 9601 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9602 9603 if (dm_new_crtc_state->stream != NULL) { 9604 const struct dc_stream_status *status = 9605 dc_stream_get_status(dm_new_crtc_state->stream); 9606 9607 if (!status) 9608 status = dc_state_get_stream_status(dc_state, 9609 dm_new_crtc_state->stream); 9610 if (!status) 9611 drm_err(dev, 9612 "got no status for stream %p on acrtc%p\n", 9613 dm_new_crtc_state->stream, acrtc); 9614 else 9615 acrtc->otg_inst = status->primary_otg_inst; 9616 } 9617 } 9618 9619 /* During boot up and resume the DC layer will reset the panel brightness 9620 * to fix a flicker issue. 9621 * It will cause the dm->actual_brightness is not the current panel brightness 9622 * level. (the dm->brightness is the correct panel level) 9623 * So we set the backlight level with dm->brightness value after set mode 9624 */ 9625 if (set_backlight_level) { 9626 for (i = 0; i < dm->num_of_edps; i++) { 9627 if (dm->backlight_dev[i]) 9628 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9629 } 9630 } 9631 } 9632 9633 static void dm_set_writeback(struct amdgpu_display_manager *dm, 9634 struct dm_crtc_state *crtc_state, 9635 struct drm_connector *connector, 9636 struct drm_connector_state *new_con_state) 9637 { 9638 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 9639 struct amdgpu_device *adev = dm->adev; 9640 struct amdgpu_crtc *acrtc; 9641 struct dc_writeback_info *wb_info; 9642 struct pipe_ctx *pipe = NULL; 9643 struct amdgpu_framebuffer *afb; 9644 int i = 0; 9645 9646 wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); 9647 if (!wb_info) { 9648 DRM_ERROR("Failed to allocate wb_info\n"); 9649 return; 9650 } 9651 9652 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 9653 if (!acrtc) { 9654 DRM_ERROR("no amdgpu_crtc found\n"); 9655 kfree(wb_info); 9656 return; 9657 } 9658 9659 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 9660 if (!afb) { 9661 DRM_ERROR("No amdgpu_framebuffer found\n"); 9662 kfree(wb_info); 9663 return; 9664 } 9665 9666 for (i = 0; i < MAX_PIPES; i++) { 9667 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 9668 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 9669 break; 9670 } 9671 } 9672 9673 /* fill in wb_info */ 9674 wb_info->wb_enabled = true; 9675 9676 wb_info->dwb_pipe_inst = 0; 9677 wb_info->dwb_params.dwbscl_black_color = 0; 9678 wb_info->dwb_params.hdr_mult = 0x1F000; 9679 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 9680 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 9681 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 9682 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 9683 9684 /* width & height from crtc */ 9685 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 9686 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 9687 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 9688 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 9689 9690 wb_info->dwb_params.cnv_params.crop_en = false; 9691 wb_info->dwb_params.stereo_params.stereo_enabled = false; 9692 9693 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 9694 wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 9695 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 9696 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 9697 9698 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 9699 9700 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 9701 9702 wb_info->dwb_params.scaler_taps.h_taps = 4; 9703 wb_info->dwb_params.scaler_taps.v_taps = 4; 9704 wb_info->dwb_params.scaler_taps.h_taps_c = 2; 9705 wb_info->dwb_params.scaler_taps.v_taps_c = 2; 9706 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 9707 9708 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 9709 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 9710 9711 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 9712 wb_info->mcif_buf_params.luma_address[i] = afb->address; 9713 wb_info->mcif_buf_params.chroma_address[i] = 0; 9714 } 9715 9716 wb_info->mcif_buf_params.p_vmid = 1; 9717 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { 9718 wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 9719 wb_info->mcif_warmup_params.region_size = 9720 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 9721 } 9722 wb_info->mcif_warmup_params.p_vmid = 1; 9723 wb_info->writeback_source_plane = pipe->plane_state; 9724 9725 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 9726 9727 acrtc->wb_pending = true; 9728 acrtc->wb_conn = wb_conn; 9729 drm_writeback_queue_job(wb_conn, new_con_state); 9730 } 9731 9732 /** 9733 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9734 * @state: The atomic state to commit 9735 * 9736 * This will tell DC to commit the constructed DC state from atomic_check, 9737 * programming the hardware. Any failures here implies a hardware failure, since 9738 * atomic check should have filtered anything non-kosher. 9739 */ 9740 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9741 { 9742 struct drm_device *dev = state->dev; 9743 struct amdgpu_device *adev = drm_to_adev(dev); 9744 struct amdgpu_display_manager *dm = &adev->dm; 9745 struct dm_atomic_state *dm_state; 9746 struct dc_state *dc_state = NULL; 9747 u32 i, j; 9748 struct drm_crtc *crtc; 9749 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9750 unsigned long flags; 9751 bool wait_for_vblank = true; 9752 struct drm_connector *connector; 9753 struct drm_connector_state *old_con_state, *new_con_state; 9754 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9755 int crtc_disable_count = 0; 9756 9757 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9758 9759 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9760 drm_dp_mst_atomic_wait_for_dependencies(state); 9761 9762 dm_state = dm_atomic_get_new_state(state); 9763 if (dm_state && dm_state->context) { 9764 dc_state = dm_state->context; 9765 amdgpu_dm_commit_streams(state, dc_state); 9766 } 9767 9768 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9769 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9770 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9771 struct amdgpu_dm_connector *aconnector; 9772 9773 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9774 continue; 9775 9776 aconnector = to_amdgpu_dm_connector(connector); 9777 9778 if (!adev->dm.hdcp_workqueue) 9779 continue; 9780 9781 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 9782 9783 if (!connector) 9784 continue; 9785 9786 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 9787 connector->index, connector->status, connector->dpms); 9788 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 9789 old_con_state->content_protection, new_con_state->content_protection); 9790 9791 if (aconnector->dc_sink) { 9792 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 9793 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 9794 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 9795 aconnector->dc_sink->edid_caps.display_name); 9796 } 9797 } 9798 9799 new_crtc_state = NULL; 9800 old_crtc_state = NULL; 9801 9802 if (acrtc) { 9803 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9804 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9805 } 9806 9807 if (old_crtc_state) 9808 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9809 old_crtc_state->enable, 9810 old_crtc_state->active, 9811 old_crtc_state->mode_changed, 9812 old_crtc_state->active_changed, 9813 old_crtc_state->connectors_changed); 9814 9815 if (new_crtc_state) 9816 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9817 new_crtc_state->enable, 9818 new_crtc_state->active, 9819 new_crtc_state->mode_changed, 9820 new_crtc_state->active_changed, 9821 new_crtc_state->connectors_changed); 9822 } 9823 9824 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9825 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9826 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9827 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9828 9829 if (!adev->dm.hdcp_workqueue) 9830 continue; 9831 9832 new_crtc_state = NULL; 9833 old_crtc_state = NULL; 9834 9835 if (acrtc) { 9836 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9837 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9838 } 9839 9840 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9841 9842 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9843 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9844 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9845 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9846 dm_new_con_state->update_hdcp = true; 9847 continue; 9848 } 9849 9850 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 9851 old_con_state, connector, adev->dm.hdcp_workqueue)) { 9852 /* when display is unplugged from mst hub, connctor will 9853 * be destroyed within dm_dp_mst_connector_destroy. connector 9854 * hdcp perperties, like type, undesired, desired, enabled, 9855 * will be lost. So, save hdcp properties into hdcp_work within 9856 * amdgpu_dm_atomic_commit_tail. if the same display is 9857 * plugged back with same display index, its hdcp properties 9858 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 9859 */ 9860 9861 bool enable_encryption = false; 9862 9863 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 9864 enable_encryption = true; 9865 9866 if (aconnector->dc_link && aconnector->dc_sink && 9867 aconnector->dc_link->type == dc_connection_mst_branch) { 9868 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 9869 struct hdcp_workqueue *hdcp_w = 9870 &hdcp_work[aconnector->dc_link->link_index]; 9871 9872 hdcp_w->hdcp_content_type[connector->index] = 9873 new_con_state->hdcp_content_type; 9874 hdcp_w->content_protection[connector->index] = 9875 new_con_state->content_protection; 9876 } 9877 9878 if (new_crtc_state && new_crtc_state->mode_changed && 9879 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 9880 enable_encryption = true; 9881 9882 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 9883 9884 if (aconnector->dc_link) 9885 hdcp_update_display( 9886 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9887 new_con_state->hdcp_content_type, enable_encryption); 9888 } 9889 } 9890 9891 /* Handle connector state changes */ 9892 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9893 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9894 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9895 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9896 struct dc_surface_update *dummy_updates; 9897 struct dc_stream_update stream_update; 9898 struct dc_info_packet hdr_packet; 9899 struct dc_stream_status *status = NULL; 9900 bool abm_changed, hdr_changed, scaling_changed; 9901 9902 memset(&stream_update, 0, sizeof(stream_update)); 9903 9904 if (acrtc) { 9905 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9906 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9907 } 9908 9909 /* Skip any modesets/resets */ 9910 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9911 continue; 9912 9913 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9914 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9915 9916 scaling_changed = is_scaling_state_different(dm_new_con_state, 9917 dm_old_con_state); 9918 9919 abm_changed = dm_new_crtc_state->abm_level != 9920 dm_old_crtc_state->abm_level; 9921 9922 hdr_changed = 9923 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9924 9925 if (!scaling_changed && !abm_changed && !hdr_changed) 9926 continue; 9927 9928 stream_update.stream = dm_new_crtc_state->stream; 9929 if (scaling_changed) { 9930 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9931 dm_new_con_state, dm_new_crtc_state->stream); 9932 9933 stream_update.src = dm_new_crtc_state->stream->src; 9934 stream_update.dst = dm_new_crtc_state->stream->dst; 9935 } 9936 9937 if (abm_changed) { 9938 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9939 9940 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9941 } 9942 9943 if (hdr_changed) { 9944 fill_hdr_info_packet(new_con_state, &hdr_packet); 9945 stream_update.hdr_static_metadata = &hdr_packet; 9946 } 9947 9948 status = dc_stream_get_status(dm_new_crtc_state->stream); 9949 9950 if (WARN_ON(!status)) 9951 continue; 9952 9953 WARN_ON(!status->plane_count); 9954 9955 /* 9956 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9957 * Here we create an empty update on each plane. 9958 * To fix this, DC should permit updating only stream properties. 9959 */ 9960 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 9961 if (!dummy_updates) { 9962 DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); 9963 continue; 9964 } 9965 for (j = 0; j < status->plane_count; j++) 9966 dummy_updates[j].surface = status->plane_states[0]; 9967 9968 sort(dummy_updates, status->plane_count, 9969 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); 9970 9971 mutex_lock(&dm->dc_lock); 9972 dc_exit_ips_for_hw_access(dm->dc); 9973 dc_update_planes_and_stream(dm->dc, 9974 dummy_updates, 9975 status->plane_count, 9976 dm_new_crtc_state->stream, 9977 &stream_update); 9978 mutex_unlock(&dm->dc_lock); 9979 kfree(dummy_updates); 9980 } 9981 9982 /** 9983 * Enable interrupts for CRTCs that are newly enabled or went through 9984 * a modeset. It was intentionally deferred until after the front end 9985 * state was modified to wait until the OTG was on and so the IRQ 9986 * handlers didn't access stale or invalid state. 9987 */ 9988 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9989 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9990 #ifdef CONFIG_DEBUG_FS 9991 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9992 #endif 9993 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9994 if (old_crtc_state->active && !new_crtc_state->active) 9995 crtc_disable_count++; 9996 9997 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9998 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9999 10000 /* For freesync config update on crtc state and params for irq */ 10001 update_stream_irq_parameters(dm, dm_new_crtc_state); 10002 10003 #ifdef CONFIG_DEBUG_FS 10004 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10005 cur_crc_src = acrtc->dm_irq_params.crc_src; 10006 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10007 #endif 10008 10009 if (new_crtc_state->active && 10010 (!old_crtc_state->active || 10011 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10012 dc_stream_retain(dm_new_crtc_state->stream); 10013 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10014 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); 10015 } 10016 /* Handle vrr on->off / off->on transitions */ 10017 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 10018 10019 #ifdef CONFIG_DEBUG_FS 10020 if (new_crtc_state->active && 10021 (!old_crtc_state->active || 10022 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10023 /** 10024 * Frontend may have changed so reapply the CRC capture 10025 * settings for the stream. 10026 */ 10027 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10028 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10029 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10030 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10031 acrtc->dm_irq_params.window_param.update_win = true; 10032 10033 /** 10034 * It takes 2 frames for HW to stably generate CRC when 10035 * resuming from suspend, so we set skip_frame_cnt 2. 10036 */ 10037 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 10038 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10039 } 10040 #endif 10041 if (amdgpu_dm_crtc_configure_crc_source( 10042 crtc, dm_new_crtc_state, cur_crc_src)) 10043 drm_dbg_atomic(dev, "Failed to configure crc source"); 10044 } 10045 } 10046 #endif 10047 } 10048 10049 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 10050 if (new_crtc_state->async_flip) 10051 wait_for_vblank = false; 10052 10053 /* update planes when needed per crtc*/ 10054 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 10055 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10056 10057 if (dm_new_crtc_state->stream) 10058 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 10059 } 10060 10061 /* Enable writeback */ 10062 for_each_new_connector_in_state(state, connector, new_con_state, i) { 10063 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10064 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10065 10066 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 10067 continue; 10068 10069 if (!new_con_state->writeback_job) 10070 continue; 10071 10072 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10073 10074 if (!new_crtc_state) 10075 continue; 10076 10077 if (acrtc->wb_enabled) 10078 continue; 10079 10080 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10081 10082 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 10083 acrtc->wb_enabled = true; 10084 } 10085 10086 /* Update audio instances for each connector. */ 10087 amdgpu_dm_commit_audio(dev, state); 10088 10089 /* restore the backlight level */ 10090 for (i = 0; i < dm->num_of_edps; i++) { 10091 if (dm->backlight_dev[i] && 10092 (dm->actual_brightness[i] != dm->brightness[i])) 10093 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10094 } 10095 10096 /* 10097 * send vblank event on all events not handled in flip and 10098 * mark consumed event for drm_atomic_helper_commit_hw_done 10099 */ 10100 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10101 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10102 10103 if (new_crtc_state->event) 10104 drm_send_event_locked(dev, &new_crtc_state->event->base); 10105 10106 new_crtc_state->event = NULL; 10107 } 10108 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10109 10110 /* Signal HW programming completion */ 10111 drm_atomic_helper_commit_hw_done(state); 10112 10113 if (wait_for_vblank) 10114 drm_atomic_helper_wait_for_flip_done(dev, state); 10115 10116 drm_atomic_helper_cleanup_planes(dev, state); 10117 10118 /* Don't free the memory if we are hitting this as part of suspend. 10119 * This way we don't free any memory during suspend; see 10120 * amdgpu_bo_free_kernel(). The memory will be freed in the first 10121 * non-suspend modeset or when the driver is torn down. 10122 */ 10123 if (!adev->in_suspend) { 10124 /* return the stolen vga memory back to VRAM */ 10125 if (!adev->mman.keep_stolen_vga_memory) 10126 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 10127 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 10128 } 10129 10130 /* 10131 * Finally, drop a runtime PM reference for each newly disabled CRTC, 10132 * so we can put the GPU into runtime suspend if we're not driving any 10133 * displays anymore 10134 */ 10135 for (i = 0; i < crtc_disable_count; i++) 10136 pm_runtime_put_autosuspend(dev->dev); 10137 pm_runtime_mark_last_busy(dev->dev); 10138 10139 trace_amdgpu_dm_atomic_commit_tail_finish(state); 10140 } 10141 10142 static int dm_force_atomic_commit(struct drm_connector *connector) 10143 { 10144 int ret = 0; 10145 struct drm_device *ddev = connector->dev; 10146 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 10147 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10148 struct drm_plane *plane = disconnected_acrtc->base.primary; 10149 struct drm_connector_state *conn_state; 10150 struct drm_crtc_state *crtc_state; 10151 struct drm_plane_state *plane_state; 10152 10153 if (!state) 10154 return -ENOMEM; 10155 10156 state->acquire_ctx = ddev->mode_config.acquire_ctx; 10157 10158 /* Construct an atomic state to restore previous display setting */ 10159 10160 /* 10161 * Attach connectors to drm_atomic_state 10162 */ 10163 conn_state = drm_atomic_get_connector_state(state, connector); 10164 10165 ret = PTR_ERR_OR_ZERO(conn_state); 10166 if (ret) 10167 goto out; 10168 10169 /* Attach crtc to drm_atomic_state*/ 10170 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 10171 10172 ret = PTR_ERR_OR_ZERO(crtc_state); 10173 if (ret) 10174 goto out; 10175 10176 /* force a restore */ 10177 crtc_state->mode_changed = true; 10178 10179 /* Attach plane to drm_atomic_state */ 10180 plane_state = drm_atomic_get_plane_state(state, plane); 10181 10182 ret = PTR_ERR_OR_ZERO(plane_state); 10183 if (ret) 10184 goto out; 10185 10186 /* Call commit internally with the state we just constructed */ 10187 ret = drm_atomic_commit(state); 10188 10189 out: 10190 drm_atomic_state_put(state); 10191 if (ret) 10192 DRM_ERROR("Restoring old state failed with %i\n", ret); 10193 10194 return ret; 10195 } 10196 10197 /* 10198 * This function handles all cases when set mode does not come upon hotplug. 10199 * This includes when a display is unplugged then plugged back into the 10200 * same port and when running without usermode desktop manager supprot 10201 */ 10202 void dm_restore_drm_connector_state(struct drm_device *dev, 10203 struct drm_connector *connector) 10204 { 10205 struct amdgpu_dm_connector *aconnector; 10206 struct amdgpu_crtc *disconnected_acrtc; 10207 struct dm_crtc_state *acrtc_state; 10208 10209 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10210 return; 10211 10212 aconnector = to_amdgpu_dm_connector(connector); 10213 10214 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10215 return; 10216 10217 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10218 if (!disconnected_acrtc) 10219 return; 10220 10221 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10222 if (!acrtc_state->stream) 10223 return; 10224 10225 /* 10226 * If the previous sink is not released and different from the current, 10227 * we deduce we are in a state where we can not rely on usermode call 10228 * to turn on the display, so we do it here 10229 */ 10230 if (acrtc_state->stream->sink != aconnector->dc_sink) 10231 dm_force_atomic_commit(&aconnector->base); 10232 } 10233 10234 /* 10235 * Grabs all modesetting locks to serialize against any blocking commits, 10236 * Waits for completion of all non blocking commits. 10237 */ 10238 static int do_aquire_global_lock(struct drm_device *dev, 10239 struct drm_atomic_state *state) 10240 { 10241 struct drm_crtc *crtc; 10242 struct drm_crtc_commit *commit; 10243 long ret; 10244 10245 /* 10246 * Adding all modeset locks to aquire_ctx will 10247 * ensure that when the framework release it the 10248 * extra locks we are locking here will get released to 10249 */ 10250 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10251 if (ret) 10252 return ret; 10253 10254 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10255 spin_lock(&crtc->commit_lock); 10256 commit = list_first_entry_or_null(&crtc->commit_list, 10257 struct drm_crtc_commit, commit_entry); 10258 if (commit) 10259 drm_crtc_commit_get(commit); 10260 spin_unlock(&crtc->commit_lock); 10261 10262 if (!commit) 10263 continue; 10264 10265 /* 10266 * Make sure all pending HW programming completed and 10267 * page flips done 10268 */ 10269 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10270 10271 if (ret > 0) 10272 ret = wait_for_completion_interruptible_timeout( 10273 &commit->flip_done, 10*HZ); 10274 10275 if (ret == 0) 10276 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 10277 crtc->base.id, crtc->name); 10278 10279 drm_crtc_commit_put(commit); 10280 } 10281 10282 return ret < 0 ? ret : 0; 10283 } 10284 10285 static void get_freesync_config_for_crtc( 10286 struct dm_crtc_state *new_crtc_state, 10287 struct dm_connector_state *new_con_state) 10288 { 10289 struct mod_freesync_config config = {0}; 10290 struct amdgpu_dm_connector *aconnector; 10291 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10292 int vrefresh = drm_mode_vrefresh(mode); 10293 bool fs_vid_mode = false; 10294 10295 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10296 return; 10297 10298 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 10299 10300 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10301 vrefresh >= aconnector->min_vfreq && 10302 vrefresh <= aconnector->max_vfreq; 10303 10304 if (new_crtc_state->vrr_supported) { 10305 new_crtc_state->stream->ignore_msa_timing_param = true; 10306 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10307 10308 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10309 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10310 config.vsif_supported = true; 10311 config.btr = true; 10312 10313 if (fs_vid_mode) { 10314 config.state = VRR_STATE_ACTIVE_FIXED; 10315 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10316 goto out; 10317 } else if (new_crtc_state->base.vrr_enabled) { 10318 config.state = VRR_STATE_ACTIVE_VARIABLE; 10319 } else { 10320 config.state = VRR_STATE_INACTIVE; 10321 } 10322 } 10323 out: 10324 new_crtc_state->freesync_config = config; 10325 } 10326 10327 static void reset_freesync_config_for_crtc( 10328 struct dm_crtc_state *new_crtc_state) 10329 { 10330 new_crtc_state->vrr_supported = false; 10331 10332 memset(&new_crtc_state->vrr_infopacket, 0, 10333 sizeof(new_crtc_state->vrr_infopacket)); 10334 } 10335 10336 static bool 10337 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10338 struct drm_crtc_state *new_crtc_state) 10339 { 10340 const struct drm_display_mode *old_mode, *new_mode; 10341 10342 if (!old_crtc_state || !new_crtc_state) 10343 return false; 10344 10345 old_mode = &old_crtc_state->mode; 10346 new_mode = &new_crtc_state->mode; 10347 10348 if (old_mode->clock == new_mode->clock && 10349 old_mode->hdisplay == new_mode->hdisplay && 10350 old_mode->vdisplay == new_mode->vdisplay && 10351 old_mode->htotal == new_mode->htotal && 10352 old_mode->vtotal != new_mode->vtotal && 10353 old_mode->hsync_start == new_mode->hsync_start && 10354 old_mode->vsync_start != new_mode->vsync_start && 10355 old_mode->hsync_end == new_mode->hsync_end && 10356 old_mode->vsync_end != new_mode->vsync_end && 10357 old_mode->hskew == new_mode->hskew && 10358 old_mode->vscan == new_mode->vscan && 10359 (old_mode->vsync_end - old_mode->vsync_start) == 10360 (new_mode->vsync_end - new_mode->vsync_start)) 10361 return true; 10362 10363 return false; 10364 } 10365 10366 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 10367 { 10368 u64 num, den, res; 10369 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10370 10371 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10372 10373 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10374 den = (unsigned long long)new_crtc_state->mode.htotal * 10375 (unsigned long long)new_crtc_state->mode.vtotal; 10376 10377 res = div_u64(num, den); 10378 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10379 } 10380 10381 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10382 struct drm_atomic_state *state, 10383 struct drm_crtc *crtc, 10384 struct drm_crtc_state *old_crtc_state, 10385 struct drm_crtc_state *new_crtc_state, 10386 bool enable, 10387 bool *lock_and_validation_needed) 10388 { 10389 struct dm_atomic_state *dm_state = NULL; 10390 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10391 struct dc_stream_state *new_stream; 10392 int ret = 0; 10393 10394 /* 10395 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10396 * update changed items 10397 */ 10398 struct amdgpu_crtc *acrtc = NULL; 10399 struct drm_connector *connector = NULL; 10400 struct amdgpu_dm_connector *aconnector = NULL; 10401 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10402 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10403 10404 new_stream = NULL; 10405 10406 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10407 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10408 acrtc = to_amdgpu_crtc(crtc); 10409 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10410 if (connector) 10411 aconnector = to_amdgpu_dm_connector(connector); 10412 10413 /* TODO This hack should go away */ 10414 if (connector && enable) { 10415 /* Make sure fake sink is created in plug-in scenario */ 10416 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10417 connector); 10418 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10419 connector); 10420 10421 if (IS_ERR(drm_new_conn_state)) { 10422 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 10423 goto fail; 10424 } 10425 10426 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10427 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10428 10429 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10430 goto skip_modeset; 10431 10432 new_stream = create_validate_stream_for_sink(aconnector, 10433 &new_crtc_state->mode, 10434 dm_new_conn_state, 10435 dm_old_crtc_state->stream); 10436 10437 /* 10438 * we can have no stream on ACTION_SET if a display 10439 * was disconnected during S3, in this case it is not an 10440 * error, the OS will be updated after detection, and 10441 * will do the right thing on next atomic commit 10442 */ 10443 10444 if (!new_stream) { 10445 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 10446 __func__, acrtc->base.base.id); 10447 ret = -ENOMEM; 10448 goto fail; 10449 } 10450 10451 /* 10452 * TODO: Check VSDB bits to decide whether this should 10453 * be enabled or not. 10454 */ 10455 new_stream->triggered_crtc_reset.enabled = 10456 dm->force_timing_sync; 10457 10458 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10459 10460 ret = fill_hdr_info_packet(drm_new_conn_state, 10461 &new_stream->hdr_static_metadata); 10462 if (ret) 10463 goto fail; 10464 10465 /* 10466 * If we already removed the old stream from the context 10467 * (and set the new stream to NULL) then we can't reuse 10468 * the old stream even if the stream and scaling are unchanged. 10469 * We'll hit the BUG_ON and black screen. 10470 * 10471 * TODO: Refactor this function to allow this check to work 10472 * in all conditions. 10473 */ 10474 if (amdgpu_freesync_vid_mode && 10475 dm_new_crtc_state->stream && 10476 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10477 goto skip_modeset; 10478 10479 if (dm_new_crtc_state->stream && 10480 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10481 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10482 new_crtc_state->mode_changed = false; 10483 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10484 new_crtc_state->mode_changed); 10485 } 10486 } 10487 10488 /* mode_changed flag may get updated above, need to check again */ 10489 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10490 goto skip_modeset; 10491 10492 drm_dbg_state(state->dev, 10493 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 10494 acrtc->crtc_id, 10495 new_crtc_state->enable, 10496 new_crtc_state->active, 10497 new_crtc_state->planes_changed, 10498 new_crtc_state->mode_changed, 10499 new_crtc_state->active_changed, 10500 new_crtc_state->connectors_changed); 10501 10502 /* Remove stream for any changed/disabled CRTC */ 10503 if (!enable) { 10504 10505 if (!dm_old_crtc_state->stream) 10506 goto skip_modeset; 10507 10508 /* Unset freesync video if it was active before */ 10509 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 10510 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 10511 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 10512 } 10513 10514 /* Now check if we should set freesync video mode */ 10515 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 10516 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10517 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 10518 is_timing_unchanged_for_freesync(new_crtc_state, 10519 old_crtc_state)) { 10520 new_crtc_state->mode_changed = false; 10521 DRM_DEBUG_DRIVER( 10522 "Mode change not required for front porch change, setting mode_changed to %d", 10523 new_crtc_state->mode_changed); 10524 10525 set_freesync_fixed_config(dm_new_crtc_state); 10526 10527 goto skip_modeset; 10528 } else if (amdgpu_freesync_vid_mode && aconnector && 10529 is_freesync_video_mode(&new_crtc_state->mode, 10530 aconnector)) { 10531 struct drm_display_mode *high_mode; 10532 10533 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10534 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 10535 set_freesync_fixed_config(dm_new_crtc_state); 10536 } 10537 10538 ret = dm_atomic_get_state(state, &dm_state); 10539 if (ret) 10540 goto fail; 10541 10542 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10543 crtc->base.id); 10544 10545 /* i.e. reset mode */ 10546 if (dc_state_remove_stream( 10547 dm->dc, 10548 dm_state->context, 10549 dm_old_crtc_state->stream) != DC_OK) { 10550 ret = -EINVAL; 10551 goto fail; 10552 } 10553 10554 dc_stream_release(dm_old_crtc_state->stream); 10555 dm_new_crtc_state->stream = NULL; 10556 10557 reset_freesync_config_for_crtc(dm_new_crtc_state); 10558 10559 *lock_and_validation_needed = true; 10560 10561 } else {/* Add stream for any updated/enabled CRTC */ 10562 /* 10563 * Quick fix to prevent NULL pointer on new_stream when 10564 * added MST connectors not found in existing crtc_state in the chained mode 10565 * TODO: need to dig out the root cause of that 10566 */ 10567 if (!connector) 10568 goto skip_modeset; 10569 10570 if (modereset_required(new_crtc_state)) 10571 goto skip_modeset; 10572 10573 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 10574 dm_old_crtc_state->stream)) { 10575 10576 WARN_ON(dm_new_crtc_state->stream); 10577 10578 ret = dm_atomic_get_state(state, &dm_state); 10579 if (ret) 10580 goto fail; 10581 10582 dm_new_crtc_state->stream = new_stream; 10583 10584 dc_stream_retain(new_stream); 10585 10586 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10587 crtc->base.id); 10588 10589 if (dc_state_add_stream( 10590 dm->dc, 10591 dm_state->context, 10592 dm_new_crtc_state->stream) != DC_OK) { 10593 ret = -EINVAL; 10594 goto fail; 10595 } 10596 10597 *lock_and_validation_needed = true; 10598 } 10599 } 10600 10601 skip_modeset: 10602 /* Release extra reference */ 10603 if (new_stream) 10604 dc_stream_release(new_stream); 10605 10606 /* 10607 * We want to do dc stream updates that do not require a 10608 * full modeset below. 10609 */ 10610 if (!(enable && connector && new_crtc_state->active)) 10611 return 0; 10612 /* 10613 * Given above conditions, the dc state cannot be NULL because: 10614 * 1. We're in the process of enabling CRTCs (just been added 10615 * to the dc context, or already is on the context) 10616 * 2. Has a valid connector attached, and 10617 * 3. Is currently active and enabled. 10618 * => The dc stream state currently exists. 10619 */ 10620 BUG_ON(dm_new_crtc_state->stream == NULL); 10621 10622 /* Scaling or underscan settings */ 10623 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10624 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10625 update_stream_scaling_settings( 10626 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10627 10628 /* ABM settings */ 10629 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10630 10631 /* 10632 * Color management settings. We also update color properties 10633 * when a modeset is needed, to ensure it gets reprogrammed. 10634 */ 10635 if (dm_new_crtc_state->base.color_mgmt_changed || 10636 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 10637 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10638 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10639 if (ret) 10640 goto fail; 10641 } 10642 10643 /* Update Freesync settings. */ 10644 get_freesync_config_for_crtc(dm_new_crtc_state, 10645 dm_new_conn_state); 10646 10647 return ret; 10648 10649 fail: 10650 if (new_stream) 10651 dc_stream_release(new_stream); 10652 return ret; 10653 } 10654 10655 static bool should_reset_plane(struct drm_atomic_state *state, 10656 struct drm_plane *plane, 10657 struct drm_plane_state *old_plane_state, 10658 struct drm_plane_state *new_plane_state) 10659 { 10660 struct drm_plane *other; 10661 struct drm_plane_state *old_other_state, *new_other_state; 10662 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10663 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 10664 struct amdgpu_device *adev = drm_to_adev(plane->dev); 10665 int i; 10666 10667 /* 10668 * TODO: Remove this hack for all asics once it proves that the 10669 * fast updates works fine on DCN3.2+. 10670 */ 10671 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 10672 state->allow_modeset) 10673 return true; 10674 10675 /* Exit early if we know that we're adding or removing the plane. */ 10676 if (old_plane_state->crtc != new_plane_state->crtc) 10677 return true; 10678 10679 /* old crtc == new_crtc == NULL, plane not in context. */ 10680 if (!new_plane_state->crtc) 10681 return false; 10682 10683 new_crtc_state = 10684 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10685 old_crtc_state = 10686 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); 10687 10688 if (!new_crtc_state) 10689 return true; 10690 10691 /* 10692 * A change in cursor mode means a new dc pipe needs to be acquired or 10693 * released from the state 10694 */ 10695 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 10696 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 10697 if (plane->type == DRM_PLANE_TYPE_CURSOR && 10698 old_dm_crtc_state != NULL && 10699 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { 10700 return true; 10701 } 10702 10703 /* CRTC Degamma changes currently require us to recreate planes. */ 10704 if (new_crtc_state->color_mgmt_changed) 10705 return true; 10706 10707 /* 10708 * On zpos change, planes need to be reordered by removing and re-adding 10709 * them one by one to the dc state, in order of descending zpos. 10710 * 10711 * TODO: We can likely skip bandwidth validation if the only thing that 10712 * changed about the plane was it'z z-ordering. 10713 */ 10714 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) 10715 return true; 10716 10717 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10718 return true; 10719 10720 /* 10721 * If there are any new primary or overlay planes being added or 10722 * removed then the z-order can potentially change. To ensure 10723 * correct z-order and pipe acquisition the current DC architecture 10724 * requires us to remove and recreate all existing planes. 10725 * 10726 * TODO: Come up with a more elegant solution for this. 10727 */ 10728 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10729 struct amdgpu_framebuffer *old_afb, *new_afb; 10730 struct dm_plane_state *dm_new_other_state, *dm_old_other_state; 10731 10732 dm_new_other_state = to_dm_plane_state(new_other_state); 10733 dm_old_other_state = to_dm_plane_state(old_other_state); 10734 10735 if (other->type == DRM_PLANE_TYPE_CURSOR) 10736 continue; 10737 10738 if (old_other_state->crtc != new_plane_state->crtc && 10739 new_other_state->crtc != new_plane_state->crtc) 10740 continue; 10741 10742 if (old_other_state->crtc != new_other_state->crtc) 10743 return true; 10744 10745 /* Src/dst size and scaling updates. */ 10746 if (old_other_state->src_w != new_other_state->src_w || 10747 old_other_state->src_h != new_other_state->src_h || 10748 old_other_state->crtc_w != new_other_state->crtc_w || 10749 old_other_state->crtc_h != new_other_state->crtc_h) 10750 return true; 10751 10752 /* Rotation / mirroring updates. */ 10753 if (old_other_state->rotation != new_other_state->rotation) 10754 return true; 10755 10756 /* Blending updates. */ 10757 if (old_other_state->pixel_blend_mode != 10758 new_other_state->pixel_blend_mode) 10759 return true; 10760 10761 /* Alpha updates. */ 10762 if (old_other_state->alpha != new_other_state->alpha) 10763 return true; 10764 10765 /* Colorspace changes. */ 10766 if (old_other_state->color_range != new_other_state->color_range || 10767 old_other_state->color_encoding != new_other_state->color_encoding) 10768 return true; 10769 10770 /* HDR/Transfer Function changes. */ 10771 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || 10772 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || 10773 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || 10774 dm_old_other_state->ctm != dm_new_other_state->ctm || 10775 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || 10776 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || 10777 dm_old_other_state->lut3d != dm_new_other_state->lut3d || 10778 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || 10779 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) 10780 return true; 10781 10782 /* Framebuffer checks fall at the end. */ 10783 if (!old_other_state->fb || !new_other_state->fb) 10784 continue; 10785 10786 /* Pixel format changes can require bandwidth updates. */ 10787 if (old_other_state->fb->format != new_other_state->fb->format) 10788 return true; 10789 10790 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10791 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10792 10793 /* Tiling and DCC changes also require bandwidth updates. */ 10794 if (old_afb->tiling_flags != new_afb->tiling_flags || 10795 old_afb->base.modifier != new_afb->base.modifier) 10796 return true; 10797 } 10798 10799 return false; 10800 } 10801 10802 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10803 struct drm_plane_state *new_plane_state, 10804 struct drm_framebuffer *fb) 10805 { 10806 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10807 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10808 unsigned int pitch; 10809 bool linear; 10810 10811 if (fb->width > new_acrtc->max_cursor_width || 10812 fb->height > new_acrtc->max_cursor_height) { 10813 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10814 new_plane_state->fb->width, 10815 new_plane_state->fb->height); 10816 return -EINVAL; 10817 } 10818 if (new_plane_state->src_w != fb->width << 16 || 10819 new_plane_state->src_h != fb->height << 16) { 10820 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10821 return -EINVAL; 10822 } 10823 10824 /* Pitch in pixels */ 10825 pitch = fb->pitches[0] / fb->format->cpp[0]; 10826 10827 if (fb->width != pitch) { 10828 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10829 fb->width, pitch); 10830 return -EINVAL; 10831 } 10832 10833 switch (pitch) { 10834 case 64: 10835 case 128: 10836 case 256: 10837 /* FB pitch is supported by cursor plane */ 10838 break; 10839 default: 10840 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10841 return -EINVAL; 10842 } 10843 10844 /* Core DRM takes care of checking FB modifiers, so we only need to 10845 * check tiling flags when the FB doesn't have a modifier. 10846 */ 10847 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10848 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 10849 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; 10850 } else if (adev->family >= AMDGPU_FAMILY_AI) { 10851 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10852 } else { 10853 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10854 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10855 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10856 } 10857 if (!linear) { 10858 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10859 return -EINVAL; 10860 } 10861 } 10862 10863 return 0; 10864 } 10865 10866 /* 10867 * Helper function for checking the cursor in native mode 10868 */ 10869 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, 10870 struct drm_plane *plane, 10871 struct drm_plane_state *new_plane_state, 10872 bool enable) 10873 { 10874 10875 struct amdgpu_crtc *new_acrtc; 10876 int ret; 10877 10878 if (!enable || !new_plane_crtc || 10879 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10880 return 0; 10881 10882 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10883 10884 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10885 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10886 return -EINVAL; 10887 } 10888 10889 if (new_plane_state->fb) { 10890 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10891 new_plane_state->fb); 10892 if (ret) 10893 return ret; 10894 } 10895 10896 return 0; 10897 } 10898 10899 static bool dm_should_update_native_cursor(struct drm_atomic_state *state, 10900 struct drm_crtc *old_plane_crtc, 10901 struct drm_crtc *new_plane_crtc, 10902 bool enable) 10903 { 10904 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10905 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10906 10907 if (!enable) { 10908 if (old_plane_crtc == NULL) 10909 return true; 10910 10911 old_crtc_state = drm_atomic_get_old_crtc_state( 10912 state, old_plane_crtc); 10913 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10914 10915 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10916 } else { 10917 if (new_plane_crtc == NULL) 10918 return true; 10919 10920 new_crtc_state = drm_atomic_get_new_crtc_state( 10921 state, new_plane_crtc); 10922 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10923 10924 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10925 } 10926 } 10927 10928 static int dm_update_plane_state(struct dc *dc, 10929 struct drm_atomic_state *state, 10930 struct drm_plane *plane, 10931 struct drm_plane_state *old_plane_state, 10932 struct drm_plane_state *new_plane_state, 10933 bool enable, 10934 bool *lock_and_validation_needed, 10935 bool *is_top_most_overlay) 10936 { 10937 10938 struct dm_atomic_state *dm_state = NULL; 10939 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10940 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10941 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10942 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10943 bool needs_reset, update_native_cursor; 10944 int ret = 0; 10945 10946 10947 new_plane_crtc = new_plane_state->crtc; 10948 old_plane_crtc = old_plane_state->crtc; 10949 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10950 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10951 10952 update_native_cursor = dm_should_update_native_cursor(state, 10953 old_plane_crtc, 10954 new_plane_crtc, 10955 enable); 10956 10957 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { 10958 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 10959 new_plane_state, enable); 10960 if (ret) 10961 return ret; 10962 10963 return 0; 10964 } 10965 10966 needs_reset = should_reset_plane(state, plane, old_plane_state, 10967 new_plane_state); 10968 10969 /* Remove any changed/removed planes */ 10970 if (!enable) { 10971 if (!needs_reset) 10972 return 0; 10973 10974 if (!old_plane_crtc) 10975 return 0; 10976 10977 old_crtc_state = drm_atomic_get_old_crtc_state( 10978 state, old_plane_crtc); 10979 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10980 10981 if (!dm_old_crtc_state->stream) 10982 return 0; 10983 10984 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10985 plane->base.id, old_plane_crtc->base.id); 10986 10987 ret = dm_atomic_get_state(state, &dm_state); 10988 if (ret) 10989 return ret; 10990 10991 if (!dc_state_remove_plane( 10992 dc, 10993 dm_old_crtc_state->stream, 10994 dm_old_plane_state->dc_state, 10995 dm_state->context)) { 10996 10997 return -EINVAL; 10998 } 10999 11000 if (dm_old_plane_state->dc_state) 11001 dc_plane_state_release(dm_old_plane_state->dc_state); 11002 11003 dm_new_plane_state->dc_state = NULL; 11004 11005 *lock_and_validation_needed = true; 11006 11007 } else { /* Add new planes */ 11008 struct dc_plane_state *dc_new_plane_state; 11009 11010 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 11011 return 0; 11012 11013 if (!new_plane_crtc) 11014 return 0; 11015 11016 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 11017 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11018 11019 if (!dm_new_crtc_state->stream) 11020 return 0; 11021 11022 if (!needs_reset) 11023 return 0; 11024 11025 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 11026 if (ret) 11027 goto out; 11028 11029 WARN_ON(dm_new_plane_state->dc_state); 11030 11031 dc_new_plane_state = dc_create_plane_state(dc); 11032 if (!dc_new_plane_state) { 11033 ret = -ENOMEM; 11034 goto out; 11035 } 11036 11037 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 11038 plane->base.id, new_plane_crtc->base.id); 11039 11040 ret = fill_dc_plane_attributes( 11041 drm_to_adev(new_plane_crtc->dev), 11042 dc_new_plane_state, 11043 new_plane_state, 11044 new_crtc_state); 11045 if (ret) { 11046 dc_plane_state_release(dc_new_plane_state); 11047 goto out; 11048 } 11049 11050 ret = dm_atomic_get_state(state, &dm_state); 11051 if (ret) { 11052 dc_plane_state_release(dc_new_plane_state); 11053 goto out; 11054 } 11055 11056 /* 11057 * Any atomic check errors that occur after this will 11058 * not need a release. The plane state will be attached 11059 * to the stream, and therefore part of the atomic 11060 * state. It'll be released when the atomic state is 11061 * cleaned. 11062 */ 11063 if (!dc_state_add_plane( 11064 dc, 11065 dm_new_crtc_state->stream, 11066 dc_new_plane_state, 11067 dm_state->context)) { 11068 11069 dc_plane_state_release(dc_new_plane_state); 11070 ret = -EINVAL; 11071 goto out; 11072 } 11073 11074 dm_new_plane_state->dc_state = dc_new_plane_state; 11075 11076 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 11077 11078 /* Tell DC to do a full surface update every time there 11079 * is a plane change. Inefficient, but works for now. 11080 */ 11081 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 11082 11083 *lock_and_validation_needed = true; 11084 } 11085 11086 out: 11087 /* If enabling cursor overlay failed, attempt fallback to native mode */ 11088 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { 11089 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11090 new_plane_state, enable); 11091 if (ret) 11092 return ret; 11093 11094 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; 11095 } 11096 11097 return ret; 11098 } 11099 11100 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 11101 int *src_w, int *src_h) 11102 { 11103 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 11104 case DRM_MODE_ROTATE_90: 11105 case DRM_MODE_ROTATE_270: 11106 *src_w = plane_state->src_h >> 16; 11107 *src_h = plane_state->src_w >> 16; 11108 break; 11109 case DRM_MODE_ROTATE_0: 11110 case DRM_MODE_ROTATE_180: 11111 default: 11112 *src_w = plane_state->src_w >> 16; 11113 *src_h = plane_state->src_h >> 16; 11114 break; 11115 } 11116 } 11117 11118 static void 11119 dm_get_plane_scale(struct drm_plane_state *plane_state, 11120 int *out_plane_scale_w, int *out_plane_scale_h) 11121 { 11122 int plane_src_w, plane_src_h; 11123 11124 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 11125 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; 11126 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; 11127 } 11128 11129 /* 11130 * The normalized_zpos value cannot be used by this iterator directly. It's only 11131 * calculated for enabled planes, potentially causing normalized_zpos collisions 11132 * between enabled/disabled planes in the atomic state. We need a unique value 11133 * so that the iterator will not generate the same object twice, or loop 11134 * indefinitely. 11135 */ 11136 static inline struct __drm_planes_state *__get_next_zpos( 11137 struct drm_atomic_state *state, 11138 struct __drm_planes_state *prev) 11139 { 11140 unsigned int highest_zpos = 0, prev_zpos = 256; 11141 uint32_t highest_id = 0, prev_id = UINT_MAX; 11142 struct drm_plane_state *new_plane_state; 11143 struct drm_plane *plane; 11144 int i, highest_i = -1; 11145 11146 if (prev != NULL) { 11147 prev_zpos = prev->new_state->zpos; 11148 prev_id = prev->ptr->base.id; 11149 } 11150 11151 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 11152 /* Skip planes with higher zpos than the previously returned */ 11153 if (new_plane_state->zpos > prev_zpos || 11154 (new_plane_state->zpos == prev_zpos && 11155 plane->base.id >= prev_id)) 11156 continue; 11157 11158 /* Save the index of the plane with highest zpos */ 11159 if (new_plane_state->zpos > highest_zpos || 11160 (new_plane_state->zpos == highest_zpos && 11161 plane->base.id > highest_id)) { 11162 highest_zpos = new_plane_state->zpos; 11163 highest_id = plane->base.id; 11164 highest_i = i; 11165 } 11166 } 11167 11168 if (highest_i < 0) 11169 return NULL; 11170 11171 return &state->planes[highest_i]; 11172 } 11173 11174 /* 11175 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate 11176 * by descending zpos, as read from the new plane state. This is the same 11177 * ordering as defined by drm_atomic_normalize_zpos(). 11178 */ 11179 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ 11180 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ 11181 __i != NULL; __i = __get_next_zpos((__state), __i)) \ 11182 for_each_if(((plane) = __i->ptr, \ 11183 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ 11184 (old_plane_state) = __i->old_state, \ 11185 (new_plane_state) = __i->new_state, 1)) 11186 11187 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 11188 { 11189 struct drm_connector *connector; 11190 struct drm_connector_state *conn_state, *old_conn_state; 11191 struct amdgpu_dm_connector *aconnector = NULL; 11192 int i; 11193 11194 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 11195 if (!conn_state->crtc) 11196 conn_state = old_conn_state; 11197 11198 if (conn_state->crtc != crtc) 11199 continue; 11200 11201 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11202 continue; 11203 11204 aconnector = to_amdgpu_dm_connector(connector); 11205 if (!aconnector->mst_output_port || !aconnector->mst_root) 11206 aconnector = NULL; 11207 else 11208 break; 11209 } 11210 11211 if (!aconnector) 11212 return 0; 11213 11214 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 11215 } 11216 11217 /** 11218 * DOC: Cursor Modes - Native vs Overlay 11219 * 11220 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw 11221 * plane. It does not require a dedicated hw plane to enable, but it is 11222 * subjected to the same z-order and scaling as the hw plane. It also has format 11223 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB 11224 * hw plane. 11225 * 11226 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its 11227 * own scaling and z-pos. It also has no blending restrictions. It lends to a 11228 * cursor behavior more akin to a DRM client's expectations. However, it does 11229 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is 11230 * available. 11231 */ 11232 11233 /** 11234 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 11235 * @adev: amdgpu device 11236 * @state: DRM atomic state 11237 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor 11238 * @cursor_mode: Returns the required cursor mode on dm_crtc_state 11239 * 11240 * Get whether the cursor should be enabled in native mode, or overlay mode, on 11241 * the dm_crtc_state. 11242 * 11243 * The cursor should be enabled in overlay mode if there exists an underlying 11244 * plane - on which the cursor may be blended - that is either YUV formatted, or 11245 * scaled differently from the cursor. 11246 * 11247 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 11248 * calling this function. 11249 * 11250 * Return: 0 on success, or an error code if getting the cursor plane state 11251 * failed. 11252 */ 11253 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, 11254 struct drm_atomic_state *state, 11255 struct dm_crtc_state *dm_crtc_state, 11256 enum amdgpu_dm_cursor_mode *cursor_mode) 11257 { 11258 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; 11259 struct drm_crtc_state *crtc_state = &dm_crtc_state->base; 11260 struct drm_plane *plane; 11261 bool consider_mode_change = false; 11262 bool entire_crtc_covered = false; 11263 bool cursor_changed = false; 11264 int underlying_scale_w, underlying_scale_h; 11265 int cursor_scale_w, cursor_scale_h; 11266 int i; 11267 11268 /* Overlay cursor not supported on HW before DCN 11269 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 11270 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 11271 */ 11272 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 11273 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11274 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11275 return 0; 11276 } 11277 11278 /* Init cursor_mode to be the same as current */ 11279 *cursor_mode = dm_crtc_state->cursor_mode; 11280 11281 /* 11282 * Cursor mode can change if a plane's format changes, scale changes, is 11283 * enabled/disabled, or z-order changes. 11284 */ 11285 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 11286 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 11287 11288 /* Only care about planes on this CRTC */ 11289 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) 11290 continue; 11291 11292 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11293 cursor_changed = true; 11294 11295 if (drm_atomic_plane_enabling(old_plane_state, plane_state) || 11296 drm_atomic_plane_disabling(old_plane_state, plane_state) || 11297 old_plane_state->fb->format != plane_state->fb->format) { 11298 consider_mode_change = true; 11299 break; 11300 } 11301 11302 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 11303 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 11304 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 11305 consider_mode_change = true; 11306 break; 11307 } 11308 } 11309 11310 if (!consider_mode_change && !crtc_state->zpos_changed) 11311 return 0; 11312 11313 /* 11314 * If no cursor change on this CRTC, and not enabled on this CRTC, then 11315 * no need to set cursor mode. This avoids needlessly locking the cursor 11316 * state. 11317 */ 11318 if (!cursor_changed && 11319 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { 11320 return 0; 11321 } 11322 11323 cursor_state = drm_atomic_get_plane_state(state, 11324 crtc_state->crtc->cursor); 11325 if (IS_ERR(cursor_state)) 11326 return PTR_ERR(cursor_state); 11327 11328 /* Cursor is disabled */ 11329 if (!cursor_state->fb) 11330 return 0; 11331 11332 /* For all planes in descending z-order (all of which are below cursor 11333 * as per zpos definitions), check their scaling and format 11334 */ 11335 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { 11336 11337 /* Only care about non-cursor planes on this CRTC */ 11338 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || 11339 plane->type == DRM_PLANE_TYPE_CURSOR) 11340 continue; 11341 11342 /* Underlying plane is YUV format - use overlay cursor */ 11343 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 11344 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11345 return 0; 11346 } 11347 11348 dm_get_plane_scale(plane_state, 11349 &underlying_scale_w, &underlying_scale_h); 11350 dm_get_plane_scale(cursor_state, 11351 &cursor_scale_w, &cursor_scale_h); 11352 11353 /* Underlying plane has different scale - use overlay cursor */ 11354 if (cursor_scale_w != underlying_scale_w && 11355 cursor_scale_h != underlying_scale_h) { 11356 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11357 return 0; 11358 } 11359 11360 /* If this plane covers the whole CRTC, no need to check planes underneath */ 11361 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && 11362 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && 11363 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { 11364 entire_crtc_covered = true; 11365 break; 11366 } 11367 } 11368 11369 /* If planes do not cover the entire CRTC, use overlay mode to enable 11370 * cursor over holes 11371 */ 11372 if (entire_crtc_covered) 11373 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11374 else 11375 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11376 11377 return 0; 11378 } 11379 11380 /** 11381 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11382 * 11383 * @dev: The DRM device 11384 * @state: The atomic state to commit 11385 * 11386 * Validate that the given atomic state is programmable by DC into hardware. 11387 * This involves constructing a &struct dc_state reflecting the new hardware 11388 * state we wish to commit, then querying DC to see if it is programmable. It's 11389 * important not to modify the existing DC state. Otherwise, atomic_check 11390 * may unexpectedly commit hardware changes. 11391 * 11392 * When validating the DC state, it's important that the right locks are 11393 * acquired. For full updates case which removes/adds/updates streams on one 11394 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 11395 * that any such full update commit will wait for completion of any outstanding 11396 * flip using DRMs synchronization events. 11397 * 11398 * Note that DM adds the affected connectors for all CRTCs in state, when that 11399 * might not seem necessary. This is because DC stream creation requires the 11400 * DC sink, which is tied to the DRM connector state. Cleaning this up should 11401 * be possible but non-trivial - a possible TODO item. 11402 * 11403 * Return: -Error code if validation failed. 11404 */ 11405 static int amdgpu_dm_atomic_check(struct drm_device *dev, 11406 struct drm_atomic_state *state) 11407 { 11408 struct amdgpu_device *adev = drm_to_adev(dev); 11409 struct dm_atomic_state *dm_state = NULL; 11410 struct dc *dc = adev->dm.dc; 11411 struct drm_connector *connector; 11412 struct drm_connector_state *old_con_state, *new_con_state; 11413 struct drm_crtc *crtc; 11414 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11415 struct drm_plane *plane; 11416 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; 11417 enum dc_status status; 11418 int ret, i; 11419 bool lock_and_validation_needed = false; 11420 bool is_top_most_overlay = true; 11421 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11422 struct drm_dp_mst_topology_mgr *mgr; 11423 struct drm_dp_mst_topology_state *mst_state; 11424 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; 11425 11426 trace_amdgpu_dm_atomic_check_begin(state); 11427 11428 ret = drm_atomic_helper_check_modeset(dev, state); 11429 if (ret) { 11430 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); 11431 goto fail; 11432 } 11433 11434 /* Check connector changes */ 11435 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11436 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11437 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11438 11439 /* Skip connectors that are disabled or part of modeset already. */ 11440 if (!new_con_state->crtc) 11441 continue; 11442 11443 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 11444 if (IS_ERR(new_crtc_state)) { 11445 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); 11446 ret = PTR_ERR(new_crtc_state); 11447 goto fail; 11448 } 11449 11450 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 11451 dm_old_con_state->scaling != dm_new_con_state->scaling) 11452 new_crtc_state->connectors_changed = true; 11453 } 11454 11455 if (dc_resource_is_dsc_encoding_supported(dc)) { 11456 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11457 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11458 ret = add_affected_mst_dsc_crtcs(state, crtc); 11459 if (ret) { 11460 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); 11461 goto fail; 11462 } 11463 } 11464 } 11465 } 11466 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11467 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11468 11469 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 11470 !new_crtc_state->color_mgmt_changed && 11471 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 11472 dm_old_crtc_state->dsc_force_changed == false) 11473 continue; 11474 11475 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 11476 if (ret) { 11477 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); 11478 goto fail; 11479 } 11480 11481 if (!new_crtc_state->enable) 11482 continue; 11483 11484 ret = drm_atomic_add_affected_connectors(state, crtc); 11485 if (ret) { 11486 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); 11487 goto fail; 11488 } 11489 11490 ret = drm_atomic_add_affected_planes(state, crtc); 11491 if (ret) { 11492 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); 11493 goto fail; 11494 } 11495 11496 if (dm_old_crtc_state->dsc_force_changed) 11497 new_crtc_state->mode_changed = true; 11498 } 11499 11500 /* 11501 * Add all primary and overlay planes on the CRTC to the state 11502 * whenever a plane is enabled to maintain correct z-ordering 11503 * and to enable fast surface updates. 11504 */ 11505 drm_for_each_crtc(crtc, dev) { 11506 bool modified = false; 11507 11508 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11509 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11510 continue; 11511 11512 if (new_plane_state->crtc == crtc || 11513 old_plane_state->crtc == crtc) { 11514 modified = true; 11515 break; 11516 } 11517 } 11518 11519 if (!modified) 11520 continue; 11521 11522 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11523 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11524 continue; 11525 11526 new_plane_state = 11527 drm_atomic_get_plane_state(state, plane); 11528 11529 if (IS_ERR(new_plane_state)) { 11530 ret = PTR_ERR(new_plane_state); 11531 drm_dbg_atomic(dev, "new_plane_state is BAD\n"); 11532 goto fail; 11533 } 11534 } 11535 } 11536 11537 /* 11538 * DC consults the zpos (layer_index in DC terminology) to determine the 11539 * hw plane on which to enable the hw cursor (see 11540 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 11541 * atomic state, so call drm helper to normalize zpos. 11542 */ 11543 ret = drm_atomic_normalize_zpos(dev, state); 11544 if (ret) { 11545 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 11546 goto fail; 11547 } 11548 11549 /* 11550 * Determine whether cursors on each CRTC should be enabled in native or 11551 * overlay mode. 11552 */ 11553 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11554 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11555 11556 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11557 &dm_new_crtc_state->cursor_mode); 11558 if (ret) { 11559 drm_dbg(dev, "Failed to determine cursor mode\n"); 11560 goto fail; 11561 } 11562 11563 /* 11564 * If overlay cursor is needed, DC cannot go through the 11565 * native cursor update path. All enabled planes on the CRTC 11566 * need to be added for DC to not disable a plane by mistake 11567 */ 11568 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11569 ret = drm_atomic_add_affected_planes(state, crtc); 11570 if (ret) 11571 goto fail; 11572 } 11573 } 11574 11575 /* Remove exiting planes if they are modified */ 11576 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11577 if (old_plane_state->fb && new_plane_state->fb && 11578 get_mem_type(old_plane_state->fb) != 11579 get_mem_type(new_plane_state->fb)) 11580 lock_and_validation_needed = true; 11581 11582 ret = dm_update_plane_state(dc, state, plane, 11583 old_plane_state, 11584 new_plane_state, 11585 false, 11586 &lock_and_validation_needed, 11587 &is_top_most_overlay); 11588 if (ret) { 11589 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11590 goto fail; 11591 } 11592 } 11593 11594 /* Disable all crtcs which require disable */ 11595 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11596 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11597 old_crtc_state, 11598 new_crtc_state, 11599 false, 11600 &lock_and_validation_needed); 11601 if (ret) { 11602 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); 11603 goto fail; 11604 } 11605 } 11606 11607 /* Enable all crtcs which require enable */ 11608 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11609 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11610 old_crtc_state, 11611 new_crtc_state, 11612 true, 11613 &lock_and_validation_needed); 11614 if (ret) { 11615 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); 11616 goto fail; 11617 } 11618 } 11619 11620 /* Add new/modified planes */ 11621 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11622 ret = dm_update_plane_state(dc, state, plane, 11623 old_plane_state, 11624 new_plane_state, 11625 true, 11626 &lock_and_validation_needed, 11627 &is_top_most_overlay); 11628 if (ret) { 11629 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11630 goto fail; 11631 } 11632 } 11633 11634 #if defined(CONFIG_DRM_AMD_DC_FP) 11635 if (dc_resource_is_dsc_encoding_supported(dc)) { 11636 ret = pre_validate_dsc(state, &dm_state, vars); 11637 if (ret != 0) 11638 goto fail; 11639 } 11640 #endif 11641 11642 /* Run this here since we want to validate the streams we created */ 11643 ret = drm_atomic_helper_check_planes(dev, state); 11644 if (ret) { 11645 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); 11646 goto fail; 11647 } 11648 11649 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11650 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11651 if (dm_new_crtc_state->mpo_requested) 11652 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); 11653 } 11654 11655 /* Check cursor restrictions */ 11656 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11657 enum amdgpu_dm_cursor_mode required_cursor_mode; 11658 int is_rotated, is_scaled; 11659 11660 /* Overlay cusor not subject to native cursor restrictions */ 11661 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11662 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 11663 continue; 11664 11665 /* Check if rotation or scaling is enabled on DCN401 */ 11666 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && 11667 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11668 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 11669 11670 is_rotated = new_cursor_state && 11671 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); 11672 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || 11673 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); 11674 11675 if (is_rotated || is_scaled) { 11676 drm_dbg_driver( 11677 crtc->dev, 11678 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", 11679 crtc->base.id, crtc->name); 11680 ret = -EINVAL; 11681 goto fail; 11682 } 11683 } 11684 11685 /* If HW can only do native cursor, check restrictions again */ 11686 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11687 &required_cursor_mode); 11688 if (ret) { 11689 drm_dbg_driver(crtc->dev, 11690 "[CRTC:%d:%s] Checking cursor mode failed\n", 11691 crtc->base.id, crtc->name); 11692 goto fail; 11693 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11694 drm_dbg_driver(crtc->dev, 11695 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 11696 crtc->base.id, crtc->name); 11697 ret = -EINVAL; 11698 goto fail; 11699 } 11700 } 11701 11702 if (state->legacy_cursor_update) { 11703 /* 11704 * This is a fast cursor update coming from the plane update 11705 * helper, check if it can be done asynchronously for better 11706 * performance. 11707 */ 11708 state->async_update = 11709 !drm_atomic_helper_async_check(dev, state); 11710 11711 /* 11712 * Skip the remaining global validation if this is an async 11713 * update. Cursor updates can be done without affecting 11714 * state or bandwidth calcs and this avoids the performance 11715 * penalty of locking the private state object and 11716 * allocating a new dc_state. 11717 */ 11718 if (state->async_update) 11719 return 0; 11720 } 11721 11722 /* Check scaling and underscan changes*/ 11723 /* TODO Removed scaling changes validation due to inability to commit 11724 * new stream into context w\o causing full reset. Need to 11725 * decide how to handle. 11726 */ 11727 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11728 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11729 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11730 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11731 11732 /* Skip any modesets/resets */ 11733 if (!acrtc || drm_atomic_crtc_needs_modeset( 11734 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 11735 continue; 11736 11737 /* Skip any thing not scale or underscan changes */ 11738 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 11739 continue; 11740 11741 lock_and_validation_needed = true; 11742 } 11743 11744 /* set the slot info for each mst_state based on the link encoding format */ 11745 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 11746 struct amdgpu_dm_connector *aconnector; 11747 struct drm_connector *connector; 11748 struct drm_connector_list_iter iter; 11749 u8 link_coding_cap; 11750 11751 drm_connector_list_iter_begin(dev, &iter); 11752 drm_for_each_connector_iter(connector, &iter) { 11753 if (connector->index == mst_state->mgr->conn_base_id) { 11754 aconnector = to_amdgpu_dm_connector(connector); 11755 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 11756 drm_dp_mst_update_slots(mst_state, link_coding_cap); 11757 11758 break; 11759 } 11760 } 11761 drm_connector_list_iter_end(&iter); 11762 } 11763 11764 /** 11765 * Streams and planes are reset when there are changes that affect 11766 * bandwidth. Anything that affects bandwidth needs to go through 11767 * DC global validation to ensure that the configuration can be applied 11768 * to hardware. 11769 * 11770 * We have to currently stall out here in atomic_check for outstanding 11771 * commits to finish in this case because our IRQ handlers reference 11772 * DRM state directly - we can end up disabling interrupts too early 11773 * if we don't. 11774 * 11775 * TODO: Remove this stall and drop DM state private objects. 11776 */ 11777 if (lock_and_validation_needed) { 11778 ret = dm_atomic_get_state(state, &dm_state); 11779 if (ret) { 11780 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); 11781 goto fail; 11782 } 11783 11784 ret = do_aquire_global_lock(dev, state); 11785 if (ret) { 11786 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); 11787 goto fail; 11788 } 11789 11790 #if defined(CONFIG_DRM_AMD_DC_FP) 11791 if (dc_resource_is_dsc_encoding_supported(dc)) { 11792 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 11793 if (ret) { 11794 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); 11795 ret = -EINVAL; 11796 goto fail; 11797 } 11798 } 11799 #endif 11800 11801 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 11802 if (ret) { 11803 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); 11804 goto fail; 11805 } 11806 11807 /* 11808 * Perform validation of MST topology in the state: 11809 * We need to perform MST atomic check before calling 11810 * dc_validate_global_state(), or there is a chance 11811 * to get stuck in an infinite loop and hang eventually. 11812 */ 11813 ret = drm_dp_mst_atomic_check(state); 11814 if (ret) { 11815 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); 11816 goto fail; 11817 } 11818 status = dc_validate_global_state(dc, dm_state->context, true); 11819 if (status != DC_OK) { 11820 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", 11821 dc_status_to_str(status), status); 11822 ret = -EINVAL; 11823 goto fail; 11824 } 11825 } else { 11826 /* 11827 * The commit is a fast update. Fast updates shouldn't change 11828 * the DC context, affect global validation, and can have their 11829 * commit work done in parallel with other commits not touching 11830 * the same resource. If we have a new DC context as part of 11831 * the DM atomic state from validation we need to free it and 11832 * retain the existing one instead. 11833 * 11834 * Furthermore, since the DM atomic state only contains the DC 11835 * context and can safely be annulled, we can free the state 11836 * and clear the associated private object now to free 11837 * some memory and avoid a possible use-after-free later. 11838 */ 11839 11840 for (i = 0; i < state->num_private_objs; i++) { 11841 struct drm_private_obj *obj = state->private_objs[i].ptr; 11842 11843 if (obj->funcs == adev->dm.atomic_obj.funcs) { 11844 int j = state->num_private_objs-1; 11845 11846 dm_atomic_destroy_state(obj, 11847 state->private_objs[i].state); 11848 11849 /* If i is not at the end of the array then the 11850 * last element needs to be moved to where i was 11851 * before the array can safely be truncated. 11852 */ 11853 if (i != j) 11854 state->private_objs[i] = 11855 state->private_objs[j]; 11856 11857 state->private_objs[j].ptr = NULL; 11858 state->private_objs[j].state = NULL; 11859 state->private_objs[j].old_state = NULL; 11860 state->private_objs[j].new_state = NULL; 11861 11862 state->num_private_objs = j; 11863 break; 11864 } 11865 } 11866 } 11867 11868 /* Store the overall update type for use later in atomic check. */ 11869 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11870 struct dm_crtc_state *dm_new_crtc_state = 11871 to_dm_crtc_state(new_crtc_state); 11872 11873 /* 11874 * Only allow async flips for fast updates that don't change 11875 * the FB pitch, the DCC state, rotation, etc. 11876 */ 11877 if (new_crtc_state->async_flip && lock_and_validation_needed) { 11878 drm_dbg_atomic(crtc->dev, 11879 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 11880 crtc->base.id, crtc->name); 11881 ret = -EINVAL; 11882 goto fail; 11883 } 11884 11885 dm_new_crtc_state->update_type = lock_and_validation_needed ? 11886 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 11887 } 11888 11889 /* Must be success */ 11890 WARN_ON(ret); 11891 11892 trace_amdgpu_dm_atomic_check_finish(state, ret); 11893 11894 return ret; 11895 11896 fail: 11897 if (ret == -EDEADLK) 11898 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); 11899 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 11900 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); 11901 else 11902 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); 11903 11904 trace_amdgpu_dm_atomic_check_finish(state, ret); 11905 11906 return ret; 11907 } 11908 11909 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 11910 unsigned int offset, 11911 unsigned int total_length, 11912 u8 *data, 11913 unsigned int length, 11914 struct amdgpu_hdmi_vsdb_info *vsdb) 11915 { 11916 bool res; 11917 union dmub_rb_cmd cmd; 11918 struct dmub_cmd_send_edid_cea *input; 11919 struct dmub_cmd_edid_cea_output *output; 11920 11921 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11922 return false; 11923 11924 memset(&cmd, 0, sizeof(cmd)); 11925 11926 input = &cmd.edid_cea.data.input; 11927 11928 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11929 cmd.edid_cea.header.sub_type = 0; 11930 cmd.edid_cea.header.payload_bytes = 11931 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11932 input->offset = offset; 11933 input->length = length; 11934 input->cea_total_length = total_length; 11935 memcpy(input->payload, data, length); 11936 11937 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 11938 if (!res) { 11939 DRM_ERROR("EDID CEA parser failed\n"); 11940 return false; 11941 } 11942 11943 output = &cmd.edid_cea.data.output; 11944 11945 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11946 if (!output->ack.success) { 11947 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11948 output->ack.offset); 11949 } 11950 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11951 if (!output->amd_vsdb.vsdb_found) 11952 return false; 11953 11954 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11955 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11956 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11957 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11958 } else { 11959 DRM_WARN("Unknown EDID CEA parser results\n"); 11960 return false; 11961 } 11962 11963 return true; 11964 } 11965 11966 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11967 u8 *edid_ext, int len, 11968 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11969 { 11970 int i; 11971 11972 /* send extension block to DMCU for parsing */ 11973 for (i = 0; i < len; i += 8) { 11974 bool res; 11975 int offset; 11976 11977 /* send 8 bytes a time */ 11978 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11979 return false; 11980 11981 if (i+8 == len) { 11982 /* EDID block sent completed, expect result */ 11983 int version, min_rate, max_rate; 11984 11985 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11986 if (res) { 11987 /* amd vsdb found */ 11988 vsdb_info->freesync_supported = 1; 11989 vsdb_info->amd_vsdb_version = version; 11990 vsdb_info->min_refresh_rate_hz = min_rate; 11991 vsdb_info->max_refresh_rate_hz = max_rate; 11992 return true; 11993 } 11994 /* not amd vsdb */ 11995 return false; 11996 } 11997 11998 /* check for ack*/ 11999 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 12000 if (!res) 12001 return false; 12002 } 12003 12004 return false; 12005 } 12006 12007 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 12008 u8 *edid_ext, int len, 12009 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12010 { 12011 int i; 12012 12013 /* send extension block to DMCU for parsing */ 12014 for (i = 0; i < len; i += 8) { 12015 /* send 8 bytes a time */ 12016 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 12017 return false; 12018 } 12019 12020 return vsdb_info->freesync_supported; 12021 } 12022 12023 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 12024 u8 *edid_ext, int len, 12025 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12026 { 12027 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 12028 bool ret; 12029 12030 mutex_lock(&adev->dm.dc_lock); 12031 if (adev->dm.dmub_srv) 12032 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 12033 else 12034 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 12035 mutex_unlock(&adev->dm.dc_lock); 12036 return ret; 12037 } 12038 12039 static void parse_edid_displayid_vrr(struct drm_connector *connector, 12040 const struct edid *edid) 12041 { 12042 u8 *edid_ext = NULL; 12043 int i; 12044 int j = 0; 12045 u16 min_vfreq; 12046 u16 max_vfreq; 12047 12048 if (edid == NULL || edid->extensions == 0) 12049 return; 12050 12051 /* Find DisplayID extension */ 12052 for (i = 0; i < edid->extensions; i++) { 12053 edid_ext = (void *)(edid + (i + 1)); 12054 if (edid_ext[0] == DISPLAYID_EXT) 12055 break; 12056 } 12057 12058 if (edid_ext == NULL) 12059 return; 12060 12061 while (j < EDID_LENGTH) { 12062 /* Get dynamic video timing range from DisplayID if available */ 12063 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 12064 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 12065 min_vfreq = edid_ext[j+9]; 12066 if (edid_ext[j+1] & 7) 12067 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 12068 else 12069 max_vfreq = edid_ext[j+10]; 12070 12071 if (max_vfreq && min_vfreq) { 12072 connector->display_info.monitor_range.max_vfreq = max_vfreq; 12073 connector->display_info.monitor_range.min_vfreq = min_vfreq; 12074 12075 return; 12076 } 12077 } 12078 j++; 12079 } 12080 } 12081 12082 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12083 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 12084 { 12085 u8 *edid_ext = NULL; 12086 int i; 12087 int j = 0; 12088 12089 if (edid == NULL || edid->extensions == 0) 12090 return -ENODEV; 12091 12092 /* Find DisplayID extension */ 12093 for (i = 0; i < edid->extensions; i++) { 12094 edid_ext = (void *)(edid + (i + 1)); 12095 if (edid_ext[0] == DISPLAYID_EXT) 12096 break; 12097 } 12098 12099 while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 12100 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 12101 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 12102 12103 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 12104 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 12105 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 12106 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 12107 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 12108 12109 return true; 12110 } 12111 j++; 12112 } 12113 12114 return false; 12115 } 12116 12117 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12118 const struct edid *edid, 12119 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12120 { 12121 u8 *edid_ext = NULL; 12122 int i; 12123 bool valid_vsdb_found = false; 12124 12125 /*----- drm_find_cea_extension() -----*/ 12126 /* No EDID or EDID extensions */ 12127 if (edid == NULL || edid->extensions == 0) 12128 return -ENODEV; 12129 12130 /* Find CEA extension */ 12131 for (i = 0; i < edid->extensions; i++) { 12132 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 12133 if (edid_ext[0] == CEA_EXT) 12134 break; 12135 } 12136 12137 if (i == edid->extensions) 12138 return -ENODEV; 12139 12140 /*----- cea_db_offsets() -----*/ 12141 if (edid_ext[0] != CEA_EXT) 12142 return -ENODEV; 12143 12144 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 12145 12146 return valid_vsdb_found ? i : -ENODEV; 12147 } 12148 12149 /** 12150 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 12151 * 12152 * @connector: Connector to query. 12153 * @drm_edid: DRM EDID from monitor 12154 * 12155 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 12156 * track of some of the display information in the internal data struct used by 12157 * amdgpu_dm. This function checks which type of connector we need to set the 12158 * FreeSync parameters. 12159 */ 12160 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 12161 const struct drm_edid *drm_edid) 12162 { 12163 int i = 0; 12164 struct amdgpu_dm_connector *amdgpu_dm_connector = 12165 to_amdgpu_dm_connector(connector); 12166 struct dm_connector_state *dm_con_state = NULL; 12167 struct dc_sink *sink; 12168 struct amdgpu_device *adev = drm_to_adev(connector->dev); 12169 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 12170 const struct edid *edid; 12171 bool freesync_capable = false; 12172 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 12173 12174 if (!connector->state) { 12175 DRM_ERROR("%s - Connector has no state", __func__); 12176 goto update; 12177 } 12178 12179 sink = amdgpu_dm_connector->dc_sink ? 12180 amdgpu_dm_connector->dc_sink : 12181 amdgpu_dm_connector->dc_em_sink; 12182 12183 drm_edid_connector_update(connector, drm_edid); 12184 12185 if (!drm_edid || !sink) { 12186 dm_con_state = to_dm_connector_state(connector->state); 12187 12188 amdgpu_dm_connector->min_vfreq = 0; 12189 amdgpu_dm_connector->max_vfreq = 0; 12190 freesync_capable = false; 12191 12192 goto update; 12193 } 12194 12195 dm_con_state = to_dm_connector_state(connector->state); 12196 12197 if (!adev->dm.freesync_module) 12198 goto update; 12199 12200 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 12201 12202 /* Some eDP panels only have the refresh rate range info in DisplayID */ 12203 if ((connector->display_info.monitor_range.min_vfreq == 0 || 12204 connector->display_info.monitor_range.max_vfreq == 0)) 12205 parse_edid_displayid_vrr(connector, edid); 12206 12207 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 12208 sink->sink_signal == SIGNAL_TYPE_EDP)) { 12209 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 12210 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 12211 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12212 freesync_capable = true; 12213 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12214 12215 if (vsdb_info.replay_mode) { 12216 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 12217 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 12218 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 12219 } 12220 12221 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 12222 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12223 if (i >= 0 && vsdb_info.freesync_supported) { 12224 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12225 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12226 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12227 freesync_capable = true; 12228 12229 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12230 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12231 } 12232 } 12233 12234 if (amdgpu_dm_connector->dc_link) 12235 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 12236 12237 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 12238 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12239 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 12240 12241 amdgpu_dm_connector->pack_sdp_v1_3 = true; 12242 amdgpu_dm_connector->as_type = as_type; 12243 amdgpu_dm_connector->vsdb_info = vsdb_info; 12244 12245 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12246 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12247 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12248 freesync_capable = true; 12249 12250 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12251 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12252 } 12253 } 12254 12255 update: 12256 if (dm_con_state) 12257 dm_con_state->freesync_capable = freesync_capable; 12258 12259 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && 12260 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { 12261 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; 12262 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; 12263 } 12264 12265 if (connector->vrr_capable_property) 12266 drm_connector_set_vrr_capable_property(connector, 12267 freesync_capable); 12268 } 12269 12270 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 12271 { 12272 struct amdgpu_device *adev = drm_to_adev(dev); 12273 struct dc *dc = adev->dm.dc; 12274 int i; 12275 12276 mutex_lock(&adev->dm.dc_lock); 12277 if (dc->current_state) { 12278 for (i = 0; i < dc->current_state->stream_count; ++i) 12279 dc->current_state->streams[i] 12280 ->triggered_crtc_reset.enabled = 12281 adev->dm.force_timing_sync; 12282 12283 dm_enable_per_frame_crtc_master_sync(dc->current_state); 12284 dc_trigger_sync(dc, dc->current_state); 12285 } 12286 mutex_unlock(&adev->dm.dc_lock); 12287 } 12288 12289 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 12290 { 12291 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 12292 dc_exit_ips_for_hw_access(dc); 12293 } 12294 12295 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 12296 u32 value, const char *func_name) 12297 { 12298 #ifdef DM_CHECK_ADDR_0 12299 if (address == 0) { 12300 drm_err(adev_to_drm(ctx->driver_context), 12301 "invalid register write. address = 0"); 12302 return; 12303 } 12304 #endif 12305 12306 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12307 cgs_write_register(ctx->cgs_device, address, value); 12308 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 12309 } 12310 12311 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 12312 const char *func_name) 12313 { 12314 u32 value; 12315 #ifdef DM_CHECK_ADDR_0 12316 if (address == 0) { 12317 drm_err(adev_to_drm(ctx->driver_context), 12318 "invalid register read; address = 0\n"); 12319 return 0; 12320 } 12321 #endif 12322 12323 if (ctx->dmub_srv && 12324 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 12325 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 12326 ASSERT(false); 12327 return 0; 12328 } 12329 12330 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12331 12332 value = cgs_read_register(ctx->cgs_device, address); 12333 12334 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 12335 12336 return value; 12337 } 12338 12339 int amdgpu_dm_process_dmub_aux_transfer_sync( 12340 struct dc_context *ctx, 12341 unsigned int link_index, 12342 struct aux_payload *payload, 12343 enum aux_return_code_type *operation_result) 12344 { 12345 struct amdgpu_device *adev = ctx->driver_context; 12346 struct dmub_notification *p_notify = adev->dm.dmub_notify; 12347 int ret = -1; 12348 12349 mutex_lock(&adev->dm.dpia_aux_lock); 12350 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 12351 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 12352 goto out; 12353 } 12354 12355 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12356 DRM_ERROR("wait_for_completion_timeout timeout!"); 12357 *operation_result = AUX_RET_ERROR_TIMEOUT; 12358 goto out; 12359 } 12360 12361 if (p_notify->result != AUX_RET_SUCCESS) { 12362 /* 12363 * Transient states before tunneling is enabled could 12364 * lead to this error. We can ignore this for now. 12365 */ 12366 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 12367 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 12368 payload->address, payload->length, 12369 p_notify->result); 12370 } 12371 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12372 goto out; 12373 } 12374 12375 12376 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 12377 if (!payload->write && p_notify->aux_reply.length && 12378 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 12379 12380 if (payload->length != p_notify->aux_reply.length) { 12381 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 12382 p_notify->aux_reply.length, 12383 payload->address, payload->length); 12384 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12385 goto out; 12386 } 12387 12388 memcpy(payload->data, p_notify->aux_reply.data, 12389 p_notify->aux_reply.length); 12390 } 12391 12392 /* success */ 12393 ret = p_notify->aux_reply.length; 12394 *operation_result = p_notify->result; 12395 out: 12396 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12397 mutex_unlock(&adev->dm.dpia_aux_lock); 12398 return ret; 12399 } 12400 12401 int amdgpu_dm_process_dmub_set_config_sync( 12402 struct dc_context *ctx, 12403 unsigned int link_index, 12404 struct set_config_cmd_payload *payload, 12405 enum set_config_status *operation_result) 12406 { 12407 struct amdgpu_device *adev = ctx->driver_context; 12408 bool is_cmd_complete; 12409 int ret; 12410 12411 mutex_lock(&adev->dm.dpia_aux_lock); 12412 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 12413 link_index, payload, adev->dm.dmub_notify); 12414 12415 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12416 ret = 0; 12417 *operation_result = adev->dm.dmub_notify->sc_status; 12418 } else { 12419 DRM_ERROR("wait_for_completion_timeout timeout!"); 12420 ret = -1; 12421 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 12422 } 12423 12424 if (!is_cmd_complete) 12425 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12426 mutex_unlock(&adev->dm.dpia_aux_lock); 12427 return ret; 12428 } 12429 12430 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12431 { 12432 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 12433 } 12434 12435 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12436 { 12437 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 12438 } 12439