1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2015-2026 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 /* The caprices of the preprocessor require that this be declared right here */ 28 #define CREATE_TRACE_POINTS 29 30 #include "dm_services_types.h" 31 #include "dc.h" 32 #include "link_enc_cfg.h" 33 #include "dc/inc/core_types.h" 34 #include "dal_asic_id.h" 35 #include "dmub/dmub_srv.h" 36 #include "dc/inc/hw/dmcu.h" 37 #include "dc/inc/hw/abm.h" 38 #include "dc/dc_dmub_srv.h" 39 #include "dc/dc_edid_parser.h" 40 #include "dc/dc_stat.h" 41 #include "dc/dc_state.h" 42 #include "amdgpu_dm_trace.h" 43 #include "link/protocols/link_dpcd.h" 44 #include "link_service_types.h" 45 #include "link/protocols/link_dp_capability.h" 46 #include "link/protocols/link_ddc.h" 47 48 #include "amdgpu.h" 49 #include "amdgpu_display.h" 50 #include "amdgpu_ucode.h" 51 #include "atom.h" 52 #include "amdgpu_dm.h" 53 #include "amdgpu_dm_plane.h" 54 #include "amdgpu_dm_crtc.h" 55 #include "amdgpu_dm_hdcp.h" 56 #include <drm/display/drm_hdcp_helper.h> 57 #include "amdgpu_dm_wb.h" 58 #include "amdgpu_atombios.h" 59 60 #include "amd_shared.h" 61 #include "amdgpu_dm_irq.h" 62 #include "dm_helpers.h" 63 #include "amdgpu_dm_mst_types.h" 64 #if defined(CONFIG_DEBUG_FS) 65 #include "amdgpu_dm_debugfs.h" 66 #endif 67 #include "amdgpu_dm_psr.h" 68 #include "amdgpu_dm_replay.h" 69 70 #include "ivsrcid/ivsrcid_vislands30.h" 71 72 #include <linux/backlight.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/types.h> 76 #include <linux/pm_runtime.h> 77 #include <linux/pci.h> 78 #include <linux/power_supply.h> 79 #include <linux/firmware.h> 80 #include <linux/component.h> 81 #include <linux/sort.h> 82 83 #include <drm/drm_privacy_screen_consumer.h> 84 #include <drm/display/drm_dp_mst_helper.h> 85 #include <drm/display/drm_hdmi_helper.h> 86 #include <drm/drm_atomic.h> 87 #include <drm/drm_atomic_uapi.h> 88 #include <drm/drm_atomic_helper.h> 89 #include <drm/drm_blend.h> 90 #include <drm/drm_fixed.h> 91 #include <drm/drm_fourcc.h> 92 #include <drm/drm_edid.h> 93 #include <drm/drm_eld.h> 94 #include <drm/drm_utils.h> 95 #include <drm/drm_vblank.h> 96 #include <drm/drm_audio_component.h> 97 #include <drm/drm_gem_atomic_helper.h> 98 99 #include <media/cec-notifier.h> 100 #include <acpi/video.h> 101 102 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 103 104 #include "modules/inc/mod_freesync.h" 105 #include "modules/power/power_helpers.h" 106 107 static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); 108 109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 131 132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 136 137 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 139 140 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 142 143 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 144 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 145 146 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 147 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 148 149 #define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin" 150 MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB); 151 152 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 153 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 154 155 #define FIRMWARE_DCN_42_DMUB "amdgpu/dcn_4_2_dmcub.bin" 156 MODULE_FIRMWARE(FIRMWARE_DCN_42_DMUB); 157 158 /** 159 * DOC: overview 160 * 161 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 162 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 163 * requests into DC requests, and DC responses into DRM responses. 164 * 165 * The root control structure is &struct amdgpu_display_manager. 166 */ 167 168 /* basic init/fini API */ 169 static int amdgpu_dm_init(struct amdgpu_device *adev); 170 static void amdgpu_dm_fini(struct amdgpu_device *adev); 171 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 172 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); 173 static struct amdgpu_i2c_adapter * 174 create_i2c(struct ddc_service *ddc_service, bool oem); 175 176 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 177 { 178 switch (link->dpcd_caps.dongle_type) { 179 case DISPLAY_DONGLE_NONE: 180 return DRM_MODE_SUBCONNECTOR_Native; 181 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 182 return DRM_MODE_SUBCONNECTOR_VGA; 183 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 184 case DISPLAY_DONGLE_DP_DVI_DONGLE: 185 return DRM_MODE_SUBCONNECTOR_DVID; 186 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 187 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 188 return DRM_MODE_SUBCONNECTOR_HDMIA; 189 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 190 default: 191 return DRM_MODE_SUBCONNECTOR_Unknown; 192 } 193 } 194 195 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 196 { 197 struct dc_link *link = aconnector->dc_link; 198 struct drm_connector *connector = &aconnector->base; 199 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 200 201 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 202 return; 203 204 if (aconnector->dc_sink) 205 subconnector = get_subconnector_type(link); 206 207 drm_object_property_set_value(&connector->base, 208 connector->dev->mode_config.dp_subconnector_property, 209 subconnector); 210 } 211 212 /* 213 * initializes drm_device display related structures, based on the information 214 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 215 * drm_encoder, drm_mode_config 216 * 217 * Returns 0 on success 218 */ 219 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 220 /* removes and deallocates the drm structures, created by the above function */ 221 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 222 223 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 224 struct amdgpu_dm_connector *amdgpu_dm_connector, 225 u32 link_index, 226 struct amdgpu_encoder *amdgpu_encoder); 227 static int amdgpu_dm_encoder_init(struct drm_device *dev, 228 struct amdgpu_encoder *aencoder, 229 uint32_t link_index); 230 231 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 232 233 static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state); 234 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 235 236 static int amdgpu_dm_atomic_check(struct drm_device *dev, 237 struct drm_atomic_state *state); 238 239 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 240 static void handle_hpd_rx_irq(void *param); 241 242 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 243 int bl_idx, 244 u32 user_brightness); 245 246 static bool 247 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 248 struct drm_crtc_state *new_crtc_state); 249 /* 250 * dm_vblank_get_counter 251 * 252 * @brief 253 * Get counter for number of vertical blanks 254 * 255 * @param 256 * struct amdgpu_device *adev - [in] desired amdgpu device 257 * int disp_idx - [in] which CRTC to get the counter from 258 * 259 * @return 260 * Counter for vertical blanks 261 */ 262 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 263 { 264 struct amdgpu_crtc *acrtc = NULL; 265 266 if (crtc >= adev->mode_info.num_crtc) 267 return 0; 268 269 acrtc = adev->mode_info.crtcs[crtc]; 270 271 if (!acrtc->dm_irq_params.stream) { 272 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", 273 crtc); 274 return 0; 275 } 276 277 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 278 } 279 280 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 281 u32 *vbl, u32 *position) 282 { 283 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; 284 struct amdgpu_crtc *acrtc = NULL; 285 struct dc *dc = adev->dm.dc; 286 287 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 288 return -EINVAL; 289 290 acrtc = adev->mode_info.crtcs[crtc]; 291 292 if (!acrtc->dm_irq_params.stream) { 293 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", 294 crtc); 295 return 0; 296 } 297 298 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 299 dc_allow_idle_optimizations(dc, false); 300 301 /* 302 * TODO rework base driver to use values directly. 303 * for now parse it back into reg-format 304 */ 305 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 306 &v_blank_start, 307 &v_blank_end, 308 &h_position, 309 &v_position); 310 311 *position = v_position | (h_position << 16); 312 *vbl = v_blank_start | (v_blank_end << 16); 313 314 return 0; 315 } 316 317 static bool dm_is_idle(struct amdgpu_ip_block *ip_block) 318 { 319 /* XXX todo */ 320 return true; 321 } 322 323 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) 324 { 325 /* XXX todo */ 326 return 0; 327 } 328 329 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) 330 { 331 return false; 332 } 333 334 static int dm_soft_reset(struct amdgpu_ip_block *ip_block) 335 { 336 /* XXX todo */ 337 return 0; 338 } 339 340 static struct amdgpu_crtc * 341 get_crtc_by_otg_inst(struct amdgpu_device *adev, 342 int otg_inst) 343 { 344 struct drm_device *dev = adev_to_drm(adev); 345 struct drm_crtc *crtc; 346 struct amdgpu_crtc *amdgpu_crtc; 347 348 if (WARN_ON(otg_inst == -1)) 349 return adev->mode_info.crtcs[0]; 350 351 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 352 amdgpu_crtc = to_amdgpu_crtc(crtc); 353 354 if (amdgpu_crtc->otg_inst == otg_inst) 355 return amdgpu_crtc; 356 } 357 358 return NULL; 359 } 360 361 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 362 struct dm_crtc_state *new_state) 363 { 364 if (new_state->stream->adjust.timing_adjust_pending) 365 return true; 366 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 367 return true; 368 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 369 return true; 370 else 371 return false; 372 } 373 374 /* 375 * DC will program planes with their z-order determined by their ordering 376 * in the dc_surface_updates array. This comparator is used to sort them 377 * by descending zpos. 378 */ 379 static int dm_plane_layer_index_cmp(const void *a, const void *b) 380 { 381 const struct dc_surface_update *sa = (struct dc_surface_update *)a; 382 const struct dc_surface_update *sb = (struct dc_surface_update *)b; 383 384 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ 385 return sb->surface->layer_index - sa->surface->layer_index; 386 } 387 388 /** 389 * update_planes_and_stream_adapter() - Send planes to be updated in DC 390 * 391 * DC has a generic way to update planes and stream via 392 * dc_update_planes_and_stream function; however, DM might need some 393 * adjustments and preparation before calling it. This function is a wrapper 394 * for the dc_update_planes_and_stream that does any required configuration 395 * before passing control to DC. 396 * 397 * @dc: Display Core control structure 398 * @update_type: specify whether it is FULL/MEDIUM/FAST update 399 * @planes_count: planes count to update 400 * @stream: stream state 401 * @stream_update: stream update 402 * @array_of_surface_update: dc surface update pointer 403 * 404 */ 405 static inline bool update_planes_and_stream_adapter(struct dc *dc, 406 int update_type, 407 int planes_count, 408 struct dc_stream_state *stream, 409 struct dc_stream_update *stream_update, 410 struct dc_surface_update *array_of_surface_update) 411 { 412 sort(array_of_surface_update, planes_count, 413 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); 414 415 /* 416 * Previous frame finished and HW is ready for optimization. 417 */ 418 dc_post_update_surfaces_to_stream(dc); 419 420 return dc_update_planes_and_stream(dc, 421 array_of_surface_update, 422 planes_count, 423 stream, 424 stream_update); 425 } 426 427 /** 428 * dm_pflip_high_irq() - Handle pageflip interrupt 429 * @interrupt_params: ignored 430 * 431 * Handles the pageflip interrupt by notifying all interested parties 432 * that the pageflip has been completed. 433 */ 434 static void dm_pflip_high_irq(void *interrupt_params) 435 { 436 struct amdgpu_crtc *amdgpu_crtc; 437 struct common_irq_params *irq_params = interrupt_params; 438 struct amdgpu_device *adev = irq_params->adev; 439 struct drm_device *dev = adev_to_drm(adev); 440 unsigned long flags; 441 struct drm_pending_vblank_event *e; 442 u32 vpos, hpos, v_blank_start, v_blank_end; 443 bool vrr_active; 444 445 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 446 447 /* IRQ could occur when in initial stage */ 448 /* TODO work and BO cleanup */ 449 if (amdgpu_crtc == NULL) { 450 drm_dbg_state(dev, "CRTC is null, returning.\n"); 451 return; 452 } 453 454 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 455 456 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 457 drm_dbg_state(dev, 458 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 459 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 460 amdgpu_crtc->crtc_id, amdgpu_crtc); 461 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 462 return; 463 } 464 465 /* page flip completed. */ 466 e = amdgpu_crtc->event; 467 amdgpu_crtc->event = NULL; 468 469 WARN_ON(!e); 470 471 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 472 473 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 474 if (!vrr_active || 475 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 476 &v_blank_end, &hpos, &vpos) || 477 (vpos < v_blank_start)) { 478 /* Update to correct count and vblank timestamp if racing with 479 * vblank irq. This also updates to the correct vblank timestamp 480 * even in VRR mode, as scanout is past the front-porch atm. 481 */ 482 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 483 484 /* Wake up userspace by sending the pageflip event with proper 485 * count and timestamp of vblank of flip completion. 486 */ 487 if (e) { 488 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 489 490 /* Event sent, so done with vblank for this flip */ 491 drm_crtc_vblank_put(&amdgpu_crtc->base); 492 } 493 } else if (e) { 494 /* VRR active and inside front-porch: vblank count and 495 * timestamp for pageflip event will only be up to date after 496 * drm_crtc_handle_vblank() has been executed from late vblank 497 * irq handler after start of back-porch (vline 0). We queue the 498 * pageflip event for send-out by drm_crtc_handle_vblank() with 499 * updated timestamp and count, once it runs after us. 500 * 501 * We need to open-code this instead of using the helper 502 * drm_crtc_arm_vblank_event(), as that helper would 503 * call drm_crtc_accurate_vblank_count(), which we must 504 * not call in VRR mode while we are in front-porch! 505 */ 506 507 /* sequence will be replaced by real count during send-out. */ 508 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 509 e->pipe = amdgpu_crtc->crtc_id; 510 511 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 512 e = NULL; 513 } 514 515 /* Keep track of vblank of this flip for flip throttling. We use the 516 * cooked hw counter, as that one incremented at start of this vblank 517 * of pageflip completion, so last_flip_vblank is the forbidden count 518 * for queueing new pageflips if vsync + VRR is enabled. 519 */ 520 amdgpu_crtc->dm_irq_params.last_flip_vblank = 521 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 522 523 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 524 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 525 526 drm_dbg_state(dev, 527 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 528 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 529 } 530 531 static void dm_handle_vmin_vmax_update(struct work_struct *offload_work) 532 { 533 struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work); 534 struct amdgpu_device *adev = work->adev; 535 struct dc_stream_state *stream = work->stream; 536 struct dc_crtc_timing_adjust *adjust = work->adjust; 537 538 mutex_lock(&adev->dm.dc_lock); 539 dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust); 540 mutex_unlock(&adev->dm.dc_lock); 541 542 dc_stream_release(stream); 543 kfree(work->adjust); 544 kfree(work); 545 } 546 547 static void schedule_dc_vmin_vmax(struct amdgpu_device *adev, 548 struct dc_stream_state *stream, 549 struct dc_crtc_timing_adjust *adjust) 550 { 551 struct vupdate_offload_work *offload_work = kzalloc_obj(*offload_work, 552 GFP_NOWAIT); 553 if (!offload_work) { 554 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); 555 return; 556 } 557 558 struct dc_crtc_timing_adjust *adjust_copy = kzalloc_obj(*adjust_copy, 559 GFP_NOWAIT); 560 if (!adjust_copy) { 561 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); 562 kfree(offload_work); 563 return; 564 } 565 566 dc_stream_retain(stream); 567 memcpy(adjust_copy, adjust, sizeof(*adjust_copy)); 568 569 INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update); 570 offload_work->adev = adev; 571 offload_work->stream = stream; 572 offload_work->adjust = adjust_copy; 573 574 queue_work(system_wq, &offload_work->work); 575 } 576 577 static void dm_vupdate_high_irq(void *interrupt_params) 578 { 579 struct common_irq_params *irq_params = interrupt_params; 580 struct amdgpu_device *adev = irq_params->adev; 581 struct amdgpu_crtc *acrtc; 582 struct drm_device *drm_dev; 583 struct drm_vblank_crtc *vblank; 584 ktime_t frame_duration_ns, previous_timestamp; 585 unsigned long flags; 586 int vrr_active; 587 588 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 589 590 if (acrtc) { 591 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 592 drm_dev = acrtc->base.dev; 593 vblank = drm_crtc_vblank_crtc(&acrtc->base); 594 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 595 frame_duration_ns = vblank->time - previous_timestamp; 596 597 if (frame_duration_ns > 0) { 598 trace_amdgpu_refresh_rate_track(acrtc->base.index, 599 frame_duration_ns, 600 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 601 atomic64_set(&irq_params->previous_timestamp, vblank->time); 602 } 603 604 drm_dbg_vbl(drm_dev, 605 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 606 vrr_active); 607 608 /* Core vblank handling is done here after end of front-porch in 609 * vrr mode, as vblank timestamping will give valid results 610 * while now done after front-porch. This will also deliver 611 * page-flip completion events that have been queued to us 612 * if a pageflip happened inside front-porch. 613 */ 614 if (vrr_active && acrtc->dm_irq_params.stream) { 615 bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; 616 bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; 617 bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state 618 == VRR_STATE_ACTIVE_VARIABLE; 619 620 amdgpu_dm_crtc_handle_vblank(acrtc); 621 622 /* BTR processing for pre-DCE12 ASICs */ 623 if (adev->family < AMDGPU_FAMILY_AI) { 624 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 625 mod_freesync_handle_v_update( 626 adev->dm.freesync_module, 627 acrtc->dm_irq_params.stream, 628 &acrtc->dm_irq_params.vrr_params); 629 630 if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { 631 schedule_dc_vmin_vmax(adev, 632 acrtc->dm_irq_params.stream, 633 &acrtc->dm_irq_params.vrr_params.adjust); 634 } 635 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 636 } 637 } 638 } 639 } 640 641 /** 642 * dm_crtc_high_irq() - Handles CRTC interrupt 643 * @interrupt_params: used for determining the CRTC instance 644 * 645 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 646 * event handler. 647 */ 648 static void dm_crtc_high_irq(void *interrupt_params) 649 { 650 struct common_irq_params *irq_params = interrupt_params; 651 struct amdgpu_device *adev = irq_params->adev; 652 struct drm_writeback_job *job; 653 struct amdgpu_crtc *acrtc; 654 unsigned long flags; 655 int vrr_active; 656 657 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 658 if (!acrtc) 659 return; 660 661 if (acrtc->wb_conn) { 662 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 663 664 if (acrtc->wb_pending) { 665 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 666 struct drm_writeback_job, 667 list_entry); 668 acrtc->wb_pending = false; 669 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 670 671 if (job) { 672 unsigned int v_total, refresh_hz; 673 struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 674 675 v_total = stream->adjust.v_total_max ? 676 stream->adjust.v_total_max : stream->timing.v_total; 677 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 678 100LL, (v_total * stream->timing.h_total)); 679 mdelay(1000 / refresh_hz); 680 681 drm_writeback_signal_completion(acrtc->wb_conn, 0); 682 dc_stream_fc_disable_writeback(adev->dm.dc, 683 acrtc->dm_irq_params.stream, 0); 684 } 685 } else 686 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 687 } 688 689 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 690 691 drm_dbg_vbl(adev_to_drm(adev), 692 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 693 vrr_active, acrtc->dm_irq_params.active_planes); 694 695 /** 696 * Core vblank handling at start of front-porch is only possible 697 * in non-vrr mode, as only there vblank timestamping will give 698 * valid results while done in front-porch. Otherwise defer it 699 * to dm_vupdate_high_irq after end of front-porch. 700 */ 701 if (!vrr_active) 702 amdgpu_dm_crtc_handle_vblank(acrtc); 703 704 /** 705 * Following stuff must happen at start of vblank, for crc 706 * computation and below-the-range btr support in vrr mode. 707 */ 708 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 709 710 /* BTR updates need to happen before VUPDATE on Vega and above. */ 711 if (adev->family < AMDGPU_FAMILY_AI) 712 return; 713 714 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 715 716 if (acrtc->dm_irq_params.stream && 717 acrtc->dm_irq_params.vrr_params.supported) { 718 bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; 719 bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; 720 bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; 721 722 mod_freesync_handle_v_update(adev->dm.freesync_module, 723 acrtc->dm_irq_params.stream, 724 &acrtc->dm_irq_params.vrr_params); 725 726 /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ 727 if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { 728 schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream, 729 &acrtc->dm_irq_params.vrr_params.adjust); 730 } 731 } 732 733 /* 734 * If there aren't any active_planes then DCH HUBP may be clock-gated. 735 * In that case, pageflip completion interrupts won't fire and pageflip 736 * completion events won't get delivered. Prevent this by sending 737 * pending pageflip events from here if a flip is still pending. 738 * 739 * If any planes are enabled, use dm_pflip_high_irq() instead, to 740 * avoid race conditions between flip programming and completion, 741 * which could cause too early flip completion events. 742 */ 743 if (adev->family >= AMDGPU_FAMILY_RV && 744 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 745 acrtc->dm_irq_params.active_planes == 0) { 746 if (acrtc->event) { 747 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 748 acrtc->event = NULL; 749 drm_crtc_vblank_put(&acrtc->base); 750 } 751 acrtc->pflip_status = AMDGPU_FLIP_NONE; 752 } 753 754 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 755 } 756 757 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 758 /** 759 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 760 * DCN generation ASICs 761 * @interrupt_params: interrupt parameters 762 * 763 * Used to set crc window/read out crc value at vertical line 0 position 764 */ 765 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 766 { 767 struct common_irq_params *irq_params = interrupt_params; 768 struct amdgpu_device *adev = irq_params->adev; 769 struct amdgpu_crtc *acrtc; 770 771 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 772 773 if (!acrtc) 774 return; 775 776 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 777 } 778 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 779 780 /** 781 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 782 * @adev: amdgpu_device pointer 783 * @notify: dmub notification structure 784 * 785 * Dmub AUX or SET_CONFIG command completion processing callback 786 * Copies dmub notification to DM which is to be read by AUX command. 787 * issuing thread and also signals the event to wake up the thread. 788 */ 789 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 790 struct dmub_notification *notify) 791 { 792 if (adev->dm.dmub_notify) 793 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 794 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 795 complete(&adev->dm.dmub_aux_transfer_done); 796 } 797 798 static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, 799 struct dmub_notification *notify) 800 { 801 if (!adev || !notify) { 802 ASSERT(false); 803 return; 804 } 805 806 const struct dmub_cmd_fused_request *req = ¬ify->fused_request; 807 const uint8_t ddc_line = req->u.aux.ddc_line; 808 809 if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { 810 ASSERT(false); 811 return; 812 } 813 814 struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; 815 816 static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); 817 memcpy(sync->reply_data, req, sizeof(*req)); 818 complete(&sync->replied); 819 } 820 821 /** 822 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 823 * @adev: amdgpu_device pointer 824 * @notify: dmub notification structure 825 * 826 * Dmub Hpd interrupt processing callback. Gets displayindex through the 827 * ink index and calls helper to do the processing. 828 */ 829 static void dmub_hpd_callback(struct amdgpu_device *adev, 830 struct dmub_notification *notify) 831 { 832 struct amdgpu_dm_connector *aconnector; 833 struct amdgpu_dm_connector *hpd_aconnector = NULL; 834 struct drm_connector *connector; 835 struct drm_connector_list_iter iter; 836 struct dc_link *link; 837 u8 link_index = 0; 838 struct drm_device *dev; 839 840 if (adev == NULL) 841 return; 842 843 if (notify == NULL) { 844 drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); 845 return; 846 } 847 848 if (notify->link_index > adev->dm.dc->link_count) { 849 drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); 850 return; 851 } 852 853 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ 854 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { 855 drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); 856 return; 857 } 858 859 link_index = notify->link_index; 860 link = adev->dm.dc->links[link_index]; 861 dev = adev->dm.ddev; 862 863 drm_connector_list_iter_begin(dev, &iter); 864 drm_for_each_connector_iter(connector, &iter) { 865 866 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 867 continue; 868 869 aconnector = to_amdgpu_dm_connector(connector); 870 if (link && aconnector->dc_link == link) { 871 if (notify->type == DMUB_NOTIFICATION_HPD) 872 drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index); 873 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 874 drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index); 875 else 876 drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n", 877 notify->type, link_index); 878 879 hpd_aconnector = aconnector; 880 break; 881 } 882 } 883 drm_connector_list_iter_end(&iter); 884 885 if (hpd_aconnector) { 886 if (notify->type == DMUB_NOTIFICATION_HPD) { 887 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) 888 drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index); 889 handle_hpd_irq_helper(hpd_aconnector); 890 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { 891 handle_hpd_rx_irq(hpd_aconnector); 892 } 893 } 894 } 895 896 /** 897 * dmub_hpd_sense_callback - DMUB HPD sense processing callback. 898 * @adev: amdgpu_device pointer 899 * @notify: dmub notification structure 900 * 901 * HPD sense changes can occur during low power states and need to be 902 * notified from firmware to driver. 903 */ 904 static void dmub_hpd_sense_callback(struct amdgpu_device *adev, 905 struct dmub_notification *notify) 906 { 907 drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); 908 } 909 910 /** 911 * register_dmub_notify_callback - Sets callback for DMUB notify 912 * @adev: amdgpu_device pointer 913 * @type: Type of dmub notification 914 * @callback: Dmub interrupt callback function 915 * @dmub_int_thread_offload: offload indicator 916 * 917 * API to register a dmub callback handler for a dmub notification 918 * Also sets indicator whether callback processing to be offloaded. 919 * to dmub interrupt handling thread 920 * Return: true if successfully registered, false if there is existing registration 921 */ 922 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 923 enum dmub_notification_type type, 924 dmub_notify_interrupt_callback_t callback, 925 bool dmub_int_thread_offload) 926 { 927 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 928 adev->dm.dmub_callback[type] = callback; 929 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 930 } else 931 return false; 932 933 return true; 934 } 935 936 static void dm_handle_hpd_work(struct work_struct *work) 937 { 938 struct dmub_hpd_work *dmub_hpd_wrk; 939 940 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 941 942 if (!dmub_hpd_wrk->dmub_notify) { 943 drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL"); 944 return; 945 } 946 947 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 948 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 949 dmub_hpd_wrk->dmub_notify); 950 } 951 952 kfree(dmub_hpd_wrk->dmub_notify); 953 kfree(dmub_hpd_wrk); 954 955 } 956 957 static const char *dmub_notification_type_str(enum dmub_notification_type e) 958 { 959 switch (e) { 960 case DMUB_NOTIFICATION_NO_DATA: 961 return "NO_DATA"; 962 case DMUB_NOTIFICATION_AUX_REPLY: 963 return "AUX_REPLY"; 964 case DMUB_NOTIFICATION_HPD: 965 return "HPD"; 966 case DMUB_NOTIFICATION_HPD_IRQ: 967 return "HPD_IRQ"; 968 case DMUB_NOTIFICATION_SET_CONFIG_REPLY: 969 return "SET_CONFIG_REPLY"; 970 case DMUB_NOTIFICATION_DPIA_NOTIFICATION: 971 return "DPIA_NOTIFICATION"; 972 case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: 973 return "HPD_SENSE_NOTIFY"; 974 case DMUB_NOTIFICATION_FUSED_IO: 975 return "FUSED_IO"; 976 default: 977 return "<unknown>"; 978 } 979 } 980 981 #define DMUB_TRACE_MAX_READ 64 982 /** 983 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 984 * @interrupt_params: used for determining the Outbox instance 985 * 986 * Handles the Outbox Interrupt 987 * event handler. 988 */ 989 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 990 { 991 struct dmub_notification notify = {0}; 992 struct common_irq_params *irq_params = interrupt_params; 993 struct amdgpu_device *adev = irq_params->adev; 994 struct amdgpu_display_manager *dm = &adev->dm; 995 struct dmcub_trace_buf_entry entry = { 0 }; 996 u32 count = 0; 997 struct dmub_hpd_work *dmub_hpd_wrk; 998 999 do { 1000 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 1001 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 1002 entry.param0, entry.param1); 1003 1004 drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 1005 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 1006 } else 1007 break; 1008 1009 count++; 1010 1011 } while (count <= DMUB_TRACE_MAX_READ); 1012 1013 if (count > DMUB_TRACE_MAX_READ) 1014 drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); 1015 1016 if (dc_enable_dmub_notifications(adev->dm.dc) && 1017 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 1018 1019 do { 1020 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 1021 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 1022 drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type); 1023 continue; 1024 } 1025 if (!dm->dmub_callback[notify.type]) { 1026 drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", 1027 dmub_notification_type_str(notify.type)); 1028 continue; 1029 } 1030 if (dm->dmub_thread_offload[notify.type] == true) { 1031 dmub_hpd_wrk = kzalloc_obj(*dmub_hpd_wrk, 1032 GFP_ATOMIC); 1033 if (!dmub_hpd_wrk) { 1034 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk"); 1035 return; 1036 } 1037 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 1038 GFP_ATOMIC); 1039 if (!dmub_hpd_wrk->dmub_notify) { 1040 kfree(dmub_hpd_wrk); 1041 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify"); 1042 return; 1043 } 1044 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 1045 dmub_hpd_wrk->adev = adev; 1046 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 1047 } else { 1048 dm->dmub_callback[notify.type](adev, ¬ify); 1049 } 1050 } while (notify.pending_notification); 1051 } 1052 } 1053 1054 static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1055 enum amd_clockgating_state state) 1056 { 1057 return 0; 1058 } 1059 1060 static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, 1061 enum amd_powergating_state state) 1062 { 1063 return 0; 1064 } 1065 1066 /* Prototypes of private functions */ 1067 static int dm_early_init(struct amdgpu_ip_block *ip_block); 1068 1069 /* Allocate memory for FBC compressed data */ 1070 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 1071 { 1072 struct amdgpu_device *adev = drm_to_adev(connector->dev); 1073 struct dm_compressor_info *compressor = &adev->dm.compressor; 1074 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 1075 struct drm_display_mode *mode; 1076 unsigned long max_size = 0; 1077 1078 if (adev->dm.dc->fbc_compressor == NULL) 1079 return; 1080 1081 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 1082 return; 1083 1084 if (compressor->bo_ptr) 1085 return; 1086 1087 1088 list_for_each_entry(mode, &connector->modes, head) { 1089 if (max_size < (unsigned long) mode->htotal * mode->vtotal) 1090 max_size = (unsigned long) mode->htotal * mode->vtotal; 1091 } 1092 1093 if (max_size) { 1094 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 1095 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 1096 &compressor->gpu_addr, &compressor->cpu_addr); 1097 1098 if (r) 1099 drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); 1100 else { 1101 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 1102 drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4); 1103 } 1104 1105 } 1106 1107 } 1108 1109 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 1110 int pipe, bool *enabled, 1111 unsigned char *buf, int max_bytes) 1112 { 1113 struct drm_device *dev = dev_get_drvdata(kdev); 1114 struct amdgpu_device *adev = drm_to_adev(dev); 1115 struct drm_connector *connector; 1116 struct drm_connector_list_iter conn_iter; 1117 struct amdgpu_dm_connector *aconnector; 1118 int ret = 0; 1119 1120 *enabled = false; 1121 1122 mutex_lock(&adev->dm.audio_lock); 1123 1124 drm_connector_list_iter_begin(dev, &conn_iter); 1125 drm_for_each_connector_iter(connector, &conn_iter) { 1126 1127 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1128 continue; 1129 1130 aconnector = to_amdgpu_dm_connector(connector); 1131 if (aconnector->audio_inst != port) 1132 continue; 1133 1134 *enabled = true; 1135 mutex_lock(&connector->eld_mutex); 1136 ret = drm_eld_size(connector->eld); 1137 memcpy(buf, connector->eld, min(max_bytes, ret)); 1138 mutex_unlock(&connector->eld_mutex); 1139 1140 break; 1141 } 1142 drm_connector_list_iter_end(&conn_iter); 1143 1144 mutex_unlock(&adev->dm.audio_lock); 1145 1146 drm_dbg_kms(adev_to_drm(adev), "Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 1147 1148 return ret; 1149 } 1150 1151 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 1152 .get_eld = amdgpu_dm_audio_component_get_eld, 1153 }; 1154 1155 static int amdgpu_dm_audio_component_bind(struct device *kdev, 1156 struct device *hda_kdev, void *data) 1157 { 1158 struct drm_device *dev = dev_get_drvdata(kdev); 1159 struct amdgpu_device *adev = drm_to_adev(dev); 1160 struct drm_audio_component *acomp = data; 1161 1162 acomp->ops = &amdgpu_dm_audio_component_ops; 1163 acomp->dev = kdev; 1164 adev->dm.audio_component = acomp; 1165 1166 return 0; 1167 } 1168 1169 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 1170 struct device *hda_kdev, void *data) 1171 { 1172 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); 1173 struct drm_audio_component *acomp = data; 1174 1175 acomp->ops = NULL; 1176 acomp->dev = NULL; 1177 adev->dm.audio_component = NULL; 1178 } 1179 1180 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1181 .bind = amdgpu_dm_audio_component_bind, 1182 .unbind = amdgpu_dm_audio_component_unbind, 1183 }; 1184 1185 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1186 { 1187 int i, ret; 1188 1189 if (!amdgpu_audio) 1190 return 0; 1191 1192 adev->mode_info.audio.enabled = true; 1193 1194 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1195 1196 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1197 adev->mode_info.audio.pin[i].channels = -1; 1198 adev->mode_info.audio.pin[i].rate = -1; 1199 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1200 adev->mode_info.audio.pin[i].status_bits = 0; 1201 adev->mode_info.audio.pin[i].category_code = 0; 1202 adev->mode_info.audio.pin[i].connected = false; 1203 adev->mode_info.audio.pin[i].id = 1204 adev->dm.dc->res_pool->audios[i]->inst; 1205 adev->mode_info.audio.pin[i].offset = 0; 1206 } 1207 1208 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1209 if (ret < 0) 1210 return ret; 1211 1212 adev->dm.audio_registered = true; 1213 1214 return 0; 1215 } 1216 1217 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1218 { 1219 if (!amdgpu_audio) 1220 return; 1221 1222 if (!adev->mode_info.audio.enabled) 1223 return; 1224 1225 if (adev->dm.audio_registered) { 1226 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1227 adev->dm.audio_registered = false; 1228 } 1229 1230 /* TODO: Disable audio? */ 1231 1232 adev->mode_info.audio.enabled = false; 1233 } 1234 1235 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1236 { 1237 struct drm_audio_component *acomp = adev->dm.audio_component; 1238 1239 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1240 drm_dbg_kms(adev_to_drm(adev), "Notify ELD: %d\n", pin); 1241 1242 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1243 pin, -1); 1244 } 1245 } 1246 1247 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1248 { 1249 const struct dmcub_firmware_header_v1_0 *hdr; 1250 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1251 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1252 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1253 struct dc *dc = adev->dm.dc; 1254 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1255 struct abm *abm = adev->dm.dc->res_pool->abm; 1256 struct dc_context *ctx = adev->dm.dc->ctx; 1257 struct dmub_srv_hw_params hw_params; 1258 enum dmub_status status; 1259 const unsigned char *fw_inst_const, *fw_bss_data; 1260 u32 i, fw_inst_const_size, fw_bss_data_size; 1261 bool has_hw_support; 1262 1263 if (!dmub_srv) 1264 /* DMUB isn't supported on the ASIC. */ 1265 return 0; 1266 1267 if (!fb_info) { 1268 drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); 1269 return -EINVAL; 1270 } 1271 1272 if (!dmub_fw) { 1273 /* Firmware required for DMUB support. */ 1274 drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); 1275 return -EINVAL; 1276 } 1277 1278 /* initialize register offsets for ASICs with runtime initialization available */ 1279 if (dmub_srv->hw_funcs.init_reg_offsets) 1280 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1281 1282 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1283 if (status != DMUB_STATUS_OK) { 1284 drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); 1285 return -EINVAL; 1286 } 1287 1288 if (!has_hw_support) { 1289 drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); 1290 return 0; 1291 } 1292 1293 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1294 status = dmub_srv_hw_reset(dmub_srv); 1295 if (status != DMUB_STATUS_OK) 1296 drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status); 1297 1298 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1299 1300 fw_inst_const = dmub_fw->data + 1301 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1302 PSP_HEADER_BYTES_256; 1303 1304 fw_bss_data = dmub_fw->data + 1305 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1306 le32_to_cpu(hdr->inst_const_bytes); 1307 1308 /* Copy firmware and bios info into FB memory. */ 1309 fw_inst_const_size = adev->dm.fw_inst_size; 1310 1311 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1312 1313 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1314 * amdgpu_ucode_init_single_fw will load dmub firmware 1315 * fw_inst_const part to cw0; otherwise, the firmware back door load 1316 * will be done by dm_dmub_hw_init 1317 */ 1318 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1319 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1320 fw_inst_const_size); 1321 } 1322 1323 if (fw_bss_data_size) 1324 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1325 fw_bss_data, fw_bss_data_size); 1326 1327 /* Copy firmware bios info into FB memory. */ 1328 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1329 adev->bios_size); 1330 1331 /* Reset regions that need to be reset. */ 1332 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1333 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1334 1335 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1336 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1337 1338 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1339 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1340 1341 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, 1342 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); 1343 1344 /* Initialize hardware. */ 1345 memset(&hw_params, 0, sizeof(hw_params)); 1346 hw_params.soc_fb_info.fb_base = adev->gmc.fb_start; 1347 hw_params.soc_fb_info.fb_offset = adev->vm_manager.vram_base_offset; 1348 1349 /* backdoor load firmware and trigger dmub running */ 1350 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1351 hw_params.load_inst_const = true; 1352 1353 if (dmcu) 1354 hw_params.psp_version = dmcu->psp_version; 1355 1356 for (i = 0; i < fb_info->num_fb; ++i) 1357 hw_params.fb[i] = &fb_info->fb[i]; 1358 1359 /* Enable usb4 dpia in the FW APU */ 1360 if (dc->caps.is_apu && 1361 dc->res_pool->usb4_dpia_count != 0 && 1362 !dc->debug.dpia_debug.bits.disable_dpia) { 1363 hw_params.dpia_supported = true; 1364 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; 1365 hw_params.dpia_hpd_int_enable_supported = false; 1366 hw_params.enable_non_transparent_setconfig = dc->config.consolidated_dpia_dp_lt; 1367 hw_params.disable_dpia_bw_allocation = !dc->config.usb4_bw_alloc_support; 1368 } 1369 1370 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1371 case IP_VERSION(3, 5, 0): 1372 case IP_VERSION(3, 5, 1): 1373 case IP_VERSION(3, 6, 0): 1374 case IP_VERSION(4, 2, 0): 1375 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1376 hw_params.lower_hbr3_phy_ssc = true; 1377 break; 1378 default: 1379 break; 1380 } 1381 1382 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1383 if (status != DMUB_STATUS_OK) { 1384 drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); 1385 return -EINVAL; 1386 } 1387 1388 /* Wait for firmware load to finish. */ 1389 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1390 if (status != DMUB_STATUS_OK) 1391 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); 1392 1393 /* Init DMCU and ABM if available. */ 1394 if (dmcu && abm) { 1395 dmcu->funcs->dmcu_init(dmcu); 1396 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1397 } 1398 1399 if (!adev->dm.dc->ctx->dmub_srv) 1400 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1401 if (!adev->dm.dc->ctx->dmub_srv) { 1402 drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); 1403 return -ENOMEM; 1404 } 1405 1406 drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n", 1407 adev->dm.dmcub_fw_version); 1408 1409 /* Keeping sanity checks off if 1410 * DCN31 >= 4.0.59.0 1411 * DCN314 >= 8.0.16.0 1412 * Otherwise, turn on sanity checks 1413 */ 1414 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1415 case IP_VERSION(3, 1, 2): 1416 case IP_VERSION(3, 1, 3): 1417 if (adev->dm.dmcub_fw_version && 1418 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1419 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) 1420 adev->dm.dc->debug.sanity_checks = true; 1421 break; 1422 case IP_VERSION(3, 1, 4): 1423 if (adev->dm.dmcub_fw_version && 1424 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1425 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) 1426 adev->dm.dc->debug.sanity_checks = true; 1427 break; 1428 default: 1429 break; 1430 } 1431 1432 return 0; 1433 } 1434 1435 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1436 { 1437 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1438 enum dmub_status status; 1439 bool init; 1440 int r; 1441 1442 if (!dmub_srv) { 1443 /* DMUB isn't supported on the ASIC. */ 1444 return; 1445 } 1446 1447 status = dmub_srv_is_hw_init(dmub_srv, &init); 1448 if (status != DMUB_STATUS_OK) 1449 drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status); 1450 1451 if (status == DMUB_STATUS_OK && init) { 1452 /* Wait for firmware load to finish. */ 1453 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1454 if (status != DMUB_STATUS_OK) 1455 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); 1456 } else { 1457 /* Perform the full hardware initialization. */ 1458 r = dm_dmub_hw_init(adev); 1459 if (r) 1460 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 1461 } 1462 } 1463 1464 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1465 { 1466 u64 pt_base; 1467 u32 logical_addr_low; 1468 u32 logical_addr_high; 1469 u32 agp_base, agp_bot, agp_top; 1470 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1471 1472 memset(pa_config, 0, sizeof(*pa_config)); 1473 1474 agp_base = 0; 1475 agp_bot = adev->gmc.agp_start >> 24; 1476 agp_top = adev->gmc.agp_end >> 24; 1477 1478 /* AGP aperture is disabled */ 1479 if (agp_bot > agp_top) { 1480 logical_addr_low = adev->gmc.fb_start >> 18; 1481 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1482 AMD_APU_IS_RENOIR | 1483 AMD_APU_IS_GREEN_SARDINE)) 1484 /* 1485 * Raven2 has a HW issue that it is unable to use the vram which 1486 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1487 * workaround that increase system aperture high address (add 1) 1488 * to get rid of the VM fault and hardware hang. 1489 */ 1490 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1491 else 1492 logical_addr_high = adev->gmc.fb_end >> 18; 1493 } else { 1494 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1495 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1496 AMD_APU_IS_RENOIR | 1497 AMD_APU_IS_GREEN_SARDINE)) 1498 /* 1499 * Raven2 has a HW issue that it is unable to use the vram which 1500 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1501 * workaround that increase system aperture high address (add 1) 1502 * to get rid of the VM fault and hardware hang. 1503 */ 1504 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1505 else 1506 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1507 } 1508 1509 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1510 1511 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1512 AMDGPU_GPU_PAGE_SHIFT); 1513 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1514 AMDGPU_GPU_PAGE_SHIFT); 1515 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1516 AMDGPU_GPU_PAGE_SHIFT); 1517 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1518 AMDGPU_GPU_PAGE_SHIFT); 1519 page_table_base.high_part = upper_32_bits(pt_base); 1520 page_table_base.low_part = lower_32_bits(pt_base); 1521 1522 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1523 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1524 1525 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1526 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1527 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1528 1529 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1530 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1531 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1532 1533 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1534 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1535 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1536 1537 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1538 1539 } 1540 1541 static void force_connector_state( 1542 struct amdgpu_dm_connector *aconnector, 1543 enum drm_connector_force force_state) 1544 { 1545 struct drm_connector *connector = &aconnector->base; 1546 1547 mutex_lock(&connector->dev->mode_config.mutex); 1548 aconnector->base.force = force_state; 1549 mutex_unlock(&connector->dev->mode_config.mutex); 1550 1551 mutex_lock(&aconnector->hpd_lock); 1552 drm_kms_helper_connector_hotplug_event(connector); 1553 mutex_unlock(&aconnector->hpd_lock); 1554 } 1555 1556 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1557 { 1558 struct hpd_rx_irq_offload_work *offload_work; 1559 struct amdgpu_dm_connector *aconnector; 1560 struct dc_link *dc_link; 1561 struct amdgpu_device *adev; 1562 enum dc_connection_type new_connection_type = dc_connection_none; 1563 unsigned long flags; 1564 union test_response test_response; 1565 1566 memset(&test_response, 0, sizeof(test_response)); 1567 1568 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1569 aconnector = offload_work->offload_wq->aconnector; 1570 adev = offload_work->adev; 1571 1572 if (!aconnector) { 1573 drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1574 goto skip; 1575 } 1576 1577 dc_link = aconnector->dc_link; 1578 1579 mutex_lock(&aconnector->hpd_lock); 1580 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1581 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 1582 mutex_unlock(&aconnector->hpd_lock); 1583 1584 if (new_connection_type == dc_connection_none) 1585 goto skip; 1586 1587 if (amdgpu_in_reset(adev)) 1588 goto skip; 1589 1590 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1591 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1592 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1593 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1594 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1595 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1596 goto skip; 1597 } 1598 1599 mutex_lock(&adev->dm.dc_lock); 1600 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1601 dc_link_dp_handle_automated_test(dc_link); 1602 1603 if (aconnector->timing_changed) { 1604 /* force connector disconnect and reconnect */ 1605 force_connector_state(aconnector, DRM_FORCE_OFF); 1606 msleep(100); 1607 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1608 } 1609 1610 test_response.bits.ACK = 1; 1611 1612 core_link_write_dpcd( 1613 dc_link, 1614 DP_TEST_RESPONSE, 1615 &test_response.raw, 1616 sizeof(test_response)); 1617 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1618 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1619 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1620 /* offload_work->data is from handle_hpd_rx_irq-> 1621 * schedule_hpd_rx_offload_work.this is defer handle 1622 * for hpd short pulse. upon here, link status may be 1623 * changed, need get latest link status from dpcd 1624 * registers. if link status is good, skip run link 1625 * training again. 1626 */ 1627 union hpd_irq_data irq_data; 1628 1629 memset(&irq_data, 0, sizeof(irq_data)); 1630 1631 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1632 * request be added to work queue if link lost at end of dc_link_ 1633 * dp_handle_link_loss 1634 */ 1635 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1636 offload_work->offload_wq->is_handling_link_loss = false; 1637 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1638 1639 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1640 dc_link_check_link_loss_status(dc_link, &irq_data)) 1641 dc_link_dp_handle_link_loss(dc_link); 1642 } 1643 mutex_unlock(&adev->dm.dc_lock); 1644 1645 skip: 1646 kfree(offload_work); 1647 1648 } 1649 1650 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev) 1651 { 1652 struct dc *dc = adev->dm.dc; 1653 int max_caps = dc->caps.max_links; 1654 int i = 0; 1655 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1656 1657 hpd_rx_offload_wq = kzalloc_objs(*hpd_rx_offload_wq, max_caps); 1658 1659 if (!hpd_rx_offload_wq) 1660 return NULL; 1661 1662 1663 for (i = 0; i < max_caps; i++) { 1664 hpd_rx_offload_wq[i].wq = 1665 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1666 1667 if (hpd_rx_offload_wq[i].wq == NULL) { 1668 drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!"); 1669 goto out_err; 1670 } 1671 1672 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1673 } 1674 1675 return hpd_rx_offload_wq; 1676 1677 out_err: 1678 for (i = 0; i < max_caps; i++) { 1679 if (hpd_rx_offload_wq[i].wq) 1680 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1681 } 1682 kfree(hpd_rx_offload_wq); 1683 return NULL; 1684 } 1685 1686 struct amdgpu_stutter_quirk { 1687 u16 chip_vendor; 1688 u16 chip_device; 1689 u16 subsys_vendor; 1690 u16 subsys_device; 1691 u8 revision; 1692 }; 1693 1694 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1695 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1696 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1697 { 0, 0, 0, 0, 0 }, 1698 }; 1699 1700 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1701 { 1702 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1703 1704 while (p && p->chip_device != 0) { 1705 if (pdev->vendor == p->chip_vendor && 1706 pdev->device == p->chip_device && 1707 pdev->subsystem_vendor == p->subsys_vendor && 1708 pdev->subsystem_device == p->subsys_device && 1709 pdev->revision == p->revision) { 1710 return true; 1711 } 1712 ++p; 1713 } 1714 return false; 1715 } 1716 1717 1718 void* 1719 dm_allocate_gpu_mem( 1720 struct amdgpu_device *adev, 1721 enum dc_gpu_mem_alloc_type type, 1722 size_t size, 1723 long long *addr) 1724 { 1725 struct dal_allocation *da; 1726 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1727 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1728 int ret; 1729 1730 da = kzalloc_obj(struct dal_allocation); 1731 if (!da) 1732 return NULL; 1733 1734 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1735 domain, &da->bo, 1736 &da->gpu_addr, &da->cpu_ptr); 1737 1738 *addr = da->gpu_addr; 1739 1740 if (ret) { 1741 kfree(da); 1742 return NULL; 1743 } 1744 1745 /* add da to list in dm */ 1746 list_add(&da->list, &adev->dm.da_list); 1747 1748 return da->cpu_ptr; 1749 } 1750 1751 void 1752 dm_free_gpu_mem( 1753 struct amdgpu_device *adev, 1754 enum dc_gpu_mem_alloc_type type, 1755 void *pvMem) 1756 { 1757 struct dal_allocation *da; 1758 1759 /* walk the da list in DM */ 1760 list_for_each_entry(da, &adev->dm.da_list, list) { 1761 if (pvMem == da->cpu_ptr) { 1762 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1763 list_del(&da->list); 1764 kfree(da); 1765 break; 1766 } 1767 } 1768 1769 } 1770 1771 static enum dmub_status 1772 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, 1773 enum dmub_gpint_command command_code, 1774 uint16_t param, 1775 uint32_t timeout_us) 1776 { 1777 union dmub_gpint_data_register reg, test; 1778 uint32_t i; 1779 1780 /* Assume that VBIOS DMUB is ready to take commands */ 1781 1782 reg.bits.status = 1; 1783 reg.bits.command_code = command_code; 1784 reg.bits.param = param; 1785 1786 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); 1787 1788 for (i = 0; i < timeout_us; ++i) { 1789 udelay(1); 1790 1791 /* Check if our GPINT got acked */ 1792 reg.bits.status = 0; 1793 test = (union dmub_gpint_data_register) 1794 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); 1795 1796 if (test.all == reg.all) 1797 return DMUB_STATUS_OK; 1798 } 1799 1800 return DMUB_STATUS_TIMEOUT; 1801 } 1802 1803 static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) 1804 { 1805 void *bb; 1806 long long addr; 1807 unsigned int bb_size; 1808 int i = 0; 1809 uint16_t chunk; 1810 enum dmub_gpint_command send_addrs[] = { 1811 DMUB_GPINT__SET_BB_ADDR_WORD0, 1812 DMUB_GPINT__SET_BB_ADDR_WORD1, 1813 DMUB_GPINT__SET_BB_ADDR_WORD2, 1814 DMUB_GPINT__SET_BB_ADDR_WORD3, 1815 }; 1816 enum dmub_status ret; 1817 1818 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1819 case IP_VERSION(4, 0, 1): 1820 bb_size = sizeof(struct dml2_soc_bb); 1821 break; 1822 case IP_VERSION(4, 2, 0): 1823 bb_size = sizeof(struct dml2_soc_bb); 1824 break; 1825 default: 1826 return NULL; 1827 } 1828 1829 bb = dm_allocate_gpu_mem(adev, 1830 DC_MEM_ALLOC_TYPE_GART, 1831 bb_size, 1832 &addr); 1833 if (!bb) 1834 return NULL; 1835 1836 for (i = 0; i < 4; i++) { 1837 /* Extract 16-bit chunk */ 1838 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; 1839 /* Send the chunk */ 1840 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); 1841 if (ret != DMUB_STATUS_OK) 1842 goto free_bb; 1843 } 1844 1845 /* Now ask DMUB to copy the bb */ 1846 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); 1847 if (ret != DMUB_STATUS_OK) 1848 goto free_bb; 1849 1850 return bb; 1851 1852 free_bb: 1853 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); 1854 return NULL; 1855 1856 } 1857 1858 static enum dmub_ips_disable_type dm_get_default_ips_mode( 1859 struct amdgpu_device *adev) 1860 { 1861 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; 1862 1863 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1864 case IP_VERSION(3, 5, 0): 1865 case IP_VERSION(3, 6, 0): 1866 case IP_VERSION(3, 5, 1): 1867 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1868 break; 1869 case IP_VERSION(4, 2, 0): 1870 ret = DMUB_IPS_DISABLE_ALL; 1871 break; 1872 default: 1873 /* ASICs older than DCN35 do not have IPSs */ 1874 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) 1875 ret = DMUB_IPS_DISABLE_ALL; 1876 break; 1877 } 1878 1879 return ret; 1880 } 1881 1882 static int amdgpu_dm_init(struct amdgpu_device *adev) 1883 { 1884 struct dc_init_data init_data; 1885 struct dc_callback_init init_params; 1886 int r; 1887 1888 adev->dm.ddev = adev_to_drm(adev); 1889 adev->dm.adev = adev; 1890 1891 /* Zero all the fields */ 1892 memset(&init_data, 0, sizeof(init_data)); 1893 memset(&init_params, 0, sizeof(init_params)); 1894 1895 mutex_init(&adev->dm.dpia_aux_lock); 1896 mutex_init(&adev->dm.dc_lock); 1897 mutex_init(&adev->dm.audio_lock); 1898 1899 if (amdgpu_dm_irq_init(adev)) { 1900 drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n"); 1901 goto error; 1902 } 1903 1904 init_data.asic_id.chip_family = adev->family; 1905 1906 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1907 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1908 init_data.asic_id.chip_id = adev->pdev->device; 1909 1910 init_data.asic_id.vram_width = adev->gmc.vram_width; 1911 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1912 init_data.asic_id.atombios_base_address = 1913 adev->mode_info.atom_context->bios; 1914 1915 init_data.driver = adev; 1916 1917 /* cgs_device was created in dm_sw_init() */ 1918 init_data.cgs_device = adev->dm.cgs_device; 1919 1920 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1921 1922 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1923 case IP_VERSION(2, 1, 0): 1924 switch (adev->dm.dmcub_fw_version) { 1925 case 0: /* development */ 1926 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1927 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1928 init_data.flags.disable_dmcu = false; 1929 break; 1930 default: 1931 init_data.flags.disable_dmcu = true; 1932 } 1933 break; 1934 case IP_VERSION(2, 0, 3): 1935 init_data.flags.disable_dmcu = true; 1936 break; 1937 default: 1938 break; 1939 } 1940 1941 /* APU support S/G display by default except: 1942 * ASICs before Carrizo, 1943 * RAVEN1 (Users reported stability issue) 1944 */ 1945 1946 if (adev->asic_type < CHIP_CARRIZO) { 1947 init_data.flags.gpu_vm_support = false; 1948 } else if (adev->asic_type == CHIP_RAVEN) { 1949 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1950 init_data.flags.gpu_vm_support = false; 1951 else 1952 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1953 } else { 1954 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) 1955 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); 1956 else 1957 init_data.flags.gpu_vm_support = 1958 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1959 } 1960 1961 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1962 1963 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1964 init_data.flags.fbc_support = true; 1965 1966 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1967 init_data.flags.multi_mon_pp_mclk_switch = true; 1968 1969 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1970 init_data.flags.disable_fractional_pwm = true; 1971 1972 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1973 init_data.flags.edp_no_power_sequencing = true; 1974 1975 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1976 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1977 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1978 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1979 1980 init_data.flags.seamless_boot_edp_requested = false; 1981 1982 if (amdgpu_device_seamless_boot_supported(adev)) { 1983 init_data.flags.seamless_boot_edp_requested = true; 1984 init_data.flags.allow_seamless_boot_optimization = true; 1985 drm_dbg(adev->dm.ddev, "Seamless boot requested\n"); 1986 } 1987 1988 init_data.flags.enable_mipi_converter_optimization = true; 1989 1990 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1991 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1992 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1993 1994 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) 1995 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; 1996 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) 1997 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; 1998 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) 1999 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 2000 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) 2001 init_data.flags.disable_ips = DMUB_IPS_ENABLE; 2002 else 2003 init_data.flags.disable_ips = dm_get_default_ips_mode(adev); 2004 2005 init_data.flags.disable_ips_in_vpb = 0; 2006 2007 /* DCN35 and above supports dynamic DTBCLK switch */ 2008 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) 2009 init_data.flags.allow_0_dtb_clk = true; 2010 2011 /* Enable DWB for tested platforms only */ 2012 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) 2013 init_data.num_virtual_links = 1; 2014 2015 /* DCN42 and above dpia switch to unified link training path */ 2016 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 2, 0)) { 2017 init_data.flags.consolidated_dpia_dp_lt = true; 2018 init_data.flags.enable_dpia_pre_training = true; 2019 init_data.flags.unify_link_enc_assignment = true; 2020 init_data.flags.usb4_bw_alloc_support = true; 2021 } 2022 retrieve_dmi_info(&adev->dm); 2023 if (adev->dm.edp0_on_dp1_quirk) 2024 init_data.flags.support_edp0_on_dp1 = true; 2025 2026 if (adev->dm.bb_from_dmub) 2027 init_data.bb_from_dmub = adev->dm.bb_from_dmub; 2028 else 2029 init_data.bb_from_dmub = NULL; 2030 2031 /* Display Core create. */ 2032 adev->dm.dc = dc_create(&init_data); 2033 2034 if (adev->dm.dc) { 2035 drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER, 2036 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 2037 } else { 2038 drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER); 2039 goto error; 2040 } 2041 2042 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 2043 adev->dm.dc->debug.force_single_disp_pipe_split = false; 2044 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 2045 } 2046 2047 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2048 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2049 if (dm_should_disable_stutter(adev->pdev)) 2050 adev->dm.dc->debug.disable_stutter = true; 2051 2052 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 2053 adev->dm.dc->debug.disable_stutter = true; 2054 2055 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 2056 adev->dm.dc->debug.disable_dsc = true; 2057 2058 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 2059 adev->dm.dc->debug.disable_clock_gate = true; 2060 2061 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2062 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2063 2064 if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { 2065 adev->dm.dc->debug.force_disable_subvp = true; 2066 adev->dm.dc->debug.fams2_config.bits.enable = false; 2067 } 2068 2069 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2070 adev->dm.dc->debug.using_dml2 = true; 2071 adev->dm.dc->debug.using_dml21 = true; 2072 } 2073 2074 if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) 2075 adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; 2076 2077 if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) 2078 adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; 2079 2080 if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) 2081 adev->dm.dc->debug.skip_detection_link_training = true; 2082 2083 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2084 2085 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 2086 adev->dm.dc->debug.ignore_cable_id = true; 2087 2088 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 2089 drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n"); 2090 2091 r = dm_dmub_hw_init(adev); 2092 if (r) { 2093 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 2094 goto error; 2095 } 2096 2097 dc_hardware_init(adev->dm.dc); 2098 2099 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); 2100 if (!adev->dm.hpd_rx_offload_wq) { 2101 drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); 2102 goto error; 2103 } 2104 2105 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 2106 struct dc_phy_addr_space_config pa_config; 2107 2108 mmhub_read_system_context(adev, &pa_config); 2109 2110 // Call the DC init_memory func 2111 dc_setup_system_context(adev->dm.dc, &pa_config); 2112 } 2113 2114 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 2115 if (!adev->dm.freesync_module) { 2116 drm_err(adev_to_drm(adev), 2117 "failed to initialize freesync_module.\n"); 2118 } else 2119 drm_dbg_driver(adev_to_drm(adev), "freesync_module init done %p.\n", 2120 adev->dm.freesync_module); 2121 2122 amdgpu_dm_init_color_mod(); 2123 2124 if (adev->dm.dc->caps.max_links > 0) { 2125 adev->dm.vblank_control_workqueue = 2126 create_singlethread_workqueue("dm_vblank_control_workqueue"); 2127 if (!adev->dm.vblank_control_workqueue) 2128 drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n"); 2129 } 2130 2131 if (adev->dm.dc->caps.ips_support && 2132 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) 2133 adev->dm.idle_workqueue = idle_create_workqueue(adev); 2134 2135 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 2136 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 2137 2138 if (!adev->dm.hdcp_workqueue) 2139 drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); 2140 else 2141 drm_dbg_driver(adev_to_drm(adev), 2142 "hdcp_workqueue init done %p.\n", 2143 adev->dm.hdcp_workqueue); 2144 2145 dc_init_callbacks(adev->dm.dc, &init_params); 2146 } 2147 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2148 init_completion(&adev->dm.dmub_aux_transfer_done); 2149 adev->dm.dmub_notify = kzalloc_obj(struct dmub_notification); 2150 if (!adev->dm.dmub_notify) { 2151 drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); 2152 goto error; 2153 } 2154 2155 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 2156 if (!adev->dm.delayed_hpd_wq) { 2157 drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n"); 2158 goto error; 2159 } 2160 2161 amdgpu_dm_outbox_init(adev); 2162 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2163 dmub_aux_setconfig_callback, false)) { 2164 drm_err(adev_to_drm(adev), "fail to register dmub aux callback"); 2165 goto error; 2166 } 2167 2168 for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) 2169 init_completion(&adev->dm.fused_io[i].replied); 2170 2171 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, 2172 dmub_aux_fused_io_callback, false)) { 2173 drm_err(adev_to_drm(adev), "fail to register dmub fused io callback"); 2174 goto error; 2175 } 2176 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 2177 * It is expected that DMUB will resend any pending notifications at this point. Note 2178 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 2179 * align legacy interface initialization sequence. Connection status will be proactivly 2180 * detected once in the amdgpu_dm_initialize_drm_device. 2181 */ 2182 dc_enable_dmub_outbox(adev->dm.dc); 2183 2184 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 2185 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 2186 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 2187 } 2188 2189 if (amdgpu_dm_initialize_drm_device(adev)) { 2190 drm_err(adev_to_drm(adev), 2191 "failed to initialize sw for display support.\n"); 2192 goto error; 2193 } 2194 2195 /* create fake encoders for MST */ 2196 dm_dp_create_fake_mst_encoders(adev); 2197 2198 /* TODO: Add_display_info? */ 2199 2200 /* TODO use dynamic cursor width */ 2201 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 2202 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 2203 2204 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 2205 drm_err(adev_to_drm(adev), 2206 "failed to initialize vblank for display support.\n"); 2207 goto error; 2208 } 2209 2210 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2211 amdgpu_dm_crtc_secure_display_create_contexts(adev); 2212 if (!adev->dm.secure_display_ctx.crtc_ctx) 2213 drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n"); 2214 2215 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) 2216 adev->dm.secure_display_ctx.support_mul_roi = true; 2217 2218 #endif 2219 2220 drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); 2221 2222 return 0; 2223 error: 2224 amdgpu_dm_fini(adev); 2225 2226 return -EINVAL; 2227 } 2228 2229 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) 2230 { 2231 struct amdgpu_device *adev = ip_block->adev; 2232 2233 amdgpu_dm_audio_fini(adev); 2234 2235 return 0; 2236 } 2237 2238 static void amdgpu_dm_fini(struct amdgpu_device *adev) 2239 { 2240 int i; 2241 2242 if (adev->dm.vblank_control_workqueue) { 2243 destroy_workqueue(adev->dm.vblank_control_workqueue); 2244 adev->dm.vblank_control_workqueue = NULL; 2245 } 2246 2247 if (adev->dm.idle_workqueue) { 2248 if (adev->dm.idle_workqueue->running) { 2249 adev->dm.idle_workqueue->enable = false; 2250 flush_work(&adev->dm.idle_workqueue->work); 2251 } 2252 2253 kfree(adev->dm.idle_workqueue); 2254 adev->dm.idle_workqueue = NULL; 2255 } 2256 2257 amdgpu_dm_destroy_drm_device(&adev->dm); 2258 2259 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2260 if (adev->dm.secure_display_ctx.crtc_ctx) { 2261 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2262 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { 2263 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); 2264 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); 2265 } 2266 } 2267 kfree(adev->dm.secure_display_ctx.crtc_ctx); 2268 adev->dm.secure_display_ctx.crtc_ctx = NULL; 2269 } 2270 #endif 2271 if (adev->dm.hdcp_workqueue) { 2272 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 2273 adev->dm.hdcp_workqueue = NULL; 2274 } 2275 2276 if (adev->dm.dc) { 2277 dc_deinit_callbacks(adev->dm.dc); 2278 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 2279 if (dc_enable_dmub_notifications(adev->dm.dc)) { 2280 kfree(adev->dm.dmub_notify); 2281 adev->dm.dmub_notify = NULL; 2282 destroy_workqueue(adev->dm.delayed_hpd_wq); 2283 adev->dm.delayed_hpd_wq = NULL; 2284 } 2285 } 2286 2287 if (adev->dm.dmub_bo) 2288 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 2289 &adev->dm.dmub_bo_gpu_addr, 2290 &adev->dm.dmub_bo_cpu_addr); 2291 2292 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { 2293 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 2294 if (adev->dm.hpd_rx_offload_wq[i].wq) { 2295 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 2296 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 2297 } 2298 } 2299 2300 kfree(adev->dm.hpd_rx_offload_wq); 2301 adev->dm.hpd_rx_offload_wq = NULL; 2302 } 2303 2304 /* DC Destroy TODO: Replace destroy DAL */ 2305 if (adev->dm.dc) 2306 dc_destroy(&adev->dm.dc); 2307 /* 2308 * TODO: pageflip, vlank interrupt 2309 * 2310 * amdgpu_dm_irq_fini(adev); 2311 */ 2312 2313 if (adev->dm.cgs_device) { 2314 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 2315 adev->dm.cgs_device = NULL; 2316 } 2317 if (adev->dm.freesync_module) { 2318 mod_freesync_destroy(adev->dm.freesync_module); 2319 adev->dm.freesync_module = NULL; 2320 } 2321 2322 mutex_destroy(&adev->dm.audio_lock); 2323 mutex_destroy(&adev->dm.dc_lock); 2324 mutex_destroy(&adev->dm.dpia_aux_lock); 2325 } 2326 2327 static int load_dmcu_fw(struct amdgpu_device *adev) 2328 { 2329 const char *fw_name_dmcu = NULL; 2330 int r; 2331 const struct dmcu_firmware_header_v1_0 *hdr; 2332 2333 switch (adev->asic_type) { 2334 #if defined(CONFIG_DRM_AMD_DC_SI) 2335 case CHIP_TAHITI: 2336 case CHIP_PITCAIRN: 2337 case CHIP_VERDE: 2338 case CHIP_OLAND: 2339 #endif 2340 case CHIP_BONAIRE: 2341 case CHIP_HAWAII: 2342 case CHIP_KAVERI: 2343 case CHIP_KABINI: 2344 case CHIP_MULLINS: 2345 case CHIP_TONGA: 2346 case CHIP_FIJI: 2347 case CHIP_CARRIZO: 2348 case CHIP_STONEY: 2349 case CHIP_POLARIS11: 2350 case CHIP_POLARIS10: 2351 case CHIP_POLARIS12: 2352 case CHIP_VEGAM: 2353 case CHIP_VEGA10: 2354 case CHIP_VEGA12: 2355 case CHIP_VEGA20: 2356 return 0; 2357 case CHIP_NAVI12: 2358 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 2359 break; 2360 case CHIP_RAVEN: 2361 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2362 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2363 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2364 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2365 else 2366 return 0; 2367 break; 2368 default: 2369 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2370 case IP_VERSION(2, 0, 2): 2371 case IP_VERSION(2, 0, 3): 2372 case IP_VERSION(2, 0, 0): 2373 case IP_VERSION(2, 1, 0): 2374 case IP_VERSION(3, 0, 0): 2375 case IP_VERSION(3, 0, 2): 2376 case IP_VERSION(3, 0, 3): 2377 case IP_VERSION(3, 0, 1): 2378 case IP_VERSION(3, 1, 2): 2379 case IP_VERSION(3, 1, 3): 2380 case IP_VERSION(3, 1, 4): 2381 case IP_VERSION(3, 1, 5): 2382 case IP_VERSION(3, 1, 6): 2383 case IP_VERSION(3, 2, 0): 2384 case IP_VERSION(3, 2, 1): 2385 case IP_VERSION(3, 5, 0): 2386 case IP_VERSION(3, 5, 1): 2387 case IP_VERSION(3, 6, 0): 2388 case IP_VERSION(4, 0, 1): 2389 case IP_VERSION(4, 2, 0): 2390 return 0; 2391 default: 2392 break; 2393 } 2394 drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type); 2395 return -EINVAL; 2396 } 2397 2398 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2399 drm_dbg_kms(adev_to_drm(adev), "dm: DMCU firmware not supported on direct or SMU loading\n"); 2400 return 0; 2401 } 2402 2403 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, 2404 "%s", fw_name_dmcu); 2405 if (r == -ENODEV) { 2406 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2407 drm_dbg_kms(adev_to_drm(adev), "dm: DMCU firmware not found\n"); 2408 adev->dm.fw_dmcu = NULL; 2409 return 0; 2410 } 2411 if (r) { 2412 drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n", 2413 fw_name_dmcu); 2414 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2415 return r; 2416 } 2417 2418 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2419 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2420 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2421 adev->firmware.fw_size += 2422 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2423 2424 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2425 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2426 adev->firmware.fw_size += 2427 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2428 2429 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2430 2431 drm_dbg_kms(adev_to_drm(adev), "PSP loading DMCU firmware\n"); 2432 2433 return 0; 2434 } 2435 2436 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2437 { 2438 struct amdgpu_device *adev = ctx; 2439 2440 return dm_read_reg(adev->dm.dc->ctx, address); 2441 } 2442 2443 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2444 uint32_t value) 2445 { 2446 struct amdgpu_device *adev = ctx; 2447 2448 return dm_write_reg(adev->dm.dc->ctx, address, value); 2449 } 2450 2451 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2452 { 2453 struct dmub_srv_create_params create_params; 2454 struct dmub_srv_fw_meta_info_params fw_meta_info_params; 2455 struct dmub_srv_region_params region_params; 2456 struct dmub_srv_region_info region_info; 2457 struct dmub_srv_memory_params memory_params; 2458 struct dmub_fw_meta_info fw_info; 2459 struct dmub_srv_fb_info *fb_info; 2460 struct dmub_srv *dmub_srv; 2461 const struct dmcub_firmware_header_v1_0 *hdr; 2462 enum dmub_asic dmub_asic; 2463 enum dmub_status status; 2464 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { 2465 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST 2466 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK 2467 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA 2468 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS 2469 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX 2470 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF 2471 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE 2472 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM 2473 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM 2474 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE 2475 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_LSDMA_BUFFER 2476 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_CURSOR_OFFLOAD 2477 }; 2478 int r; 2479 2480 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2481 case IP_VERSION(2, 1, 0): 2482 dmub_asic = DMUB_ASIC_DCN21; 2483 break; 2484 case IP_VERSION(3, 0, 0): 2485 dmub_asic = DMUB_ASIC_DCN30; 2486 break; 2487 case IP_VERSION(3, 0, 1): 2488 dmub_asic = DMUB_ASIC_DCN301; 2489 break; 2490 case IP_VERSION(3, 0, 2): 2491 dmub_asic = DMUB_ASIC_DCN302; 2492 break; 2493 case IP_VERSION(3, 0, 3): 2494 dmub_asic = DMUB_ASIC_DCN303; 2495 break; 2496 case IP_VERSION(3, 1, 2): 2497 case IP_VERSION(3, 1, 3): 2498 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2499 break; 2500 case IP_VERSION(3, 1, 4): 2501 dmub_asic = DMUB_ASIC_DCN314; 2502 break; 2503 case IP_VERSION(3, 1, 5): 2504 dmub_asic = DMUB_ASIC_DCN315; 2505 break; 2506 case IP_VERSION(3, 1, 6): 2507 dmub_asic = DMUB_ASIC_DCN316; 2508 break; 2509 case IP_VERSION(3, 2, 0): 2510 dmub_asic = DMUB_ASIC_DCN32; 2511 break; 2512 case IP_VERSION(3, 2, 1): 2513 dmub_asic = DMUB_ASIC_DCN321; 2514 break; 2515 case IP_VERSION(3, 5, 0): 2516 case IP_VERSION(3, 5, 1): 2517 dmub_asic = DMUB_ASIC_DCN35; 2518 break; 2519 case IP_VERSION(3, 6, 0): 2520 dmub_asic = DMUB_ASIC_DCN36; 2521 break; 2522 case IP_VERSION(4, 0, 1): 2523 dmub_asic = DMUB_ASIC_DCN401; 2524 break; 2525 case IP_VERSION(4, 2, 0): 2526 dmub_asic = DMUB_ASIC_DCN42; 2527 break; 2528 default: 2529 /* ASIC doesn't support DMUB. */ 2530 return 0; 2531 } 2532 2533 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2534 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2535 2536 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2537 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2538 AMDGPU_UCODE_ID_DMCUB; 2539 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2540 adev->dm.dmub_fw; 2541 adev->firmware.fw_size += 2542 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2543 2544 drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n", 2545 adev->dm.dmcub_fw_version); 2546 } 2547 2548 2549 adev->dm.dmub_srv = kzalloc_obj(*adev->dm.dmub_srv); 2550 dmub_srv = adev->dm.dmub_srv; 2551 2552 if (!dmub_srv) { 2553 drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n"); 2554 return -ENOMEM; 2555 } 2556 2557 memset(&create_params, 0, sizeof(create_params)); 2558 create_params.user_ctx = adev; 2559 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2560 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2561 create_params.asic = dmub_asic; 2562 2563 /* Create the DMUB service. */ 2564 status = dmub_srv_create(dmub_srv, &create_params); 2565 if (status != DMUB_STATUS_OK) { 2566 drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status); 2567 return -EINVAL; 2568 } 2569 2570 /* Extract the FW meta info. */ 2571 memset(&fw_meta_info_params, 0, sizeof(fw_meta_info_params)); 2572 2573 fw_meta_info_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2574 PSP_HEADER_BYTES_256; 2575 fw_meta_info_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2576 fw_meta_info_params.fw_inst_const = adev->dm.dmub_fw->data + 2577 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2578 PSP_HEADER_BYTES_256; 2579 fw_meta_info_params.fw_bss_data = region_params.bss_data_size ? adev->dm.dmub_fw->data + 2580 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2581 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2582 fw_meta_info_params.custom_psp_footer_size = 0; 2583 2584 status = dmub_srv_get_fw_meta_info_from_raw_fw(&fw_meta_info_params, &fw_info); 2585 if (status != DMUB_STATUS_OK) { 2586 /* Skip returning early, just log the error. */ 2587 drm_err(adev_to_drm(adev), "Error getting DMUB FW meta info: %d\n", status); 2588 // return -EINVAL; 2589 } 2590 2591 /* Calculate the size of all the regions for the DMUB service. */ 2592 memset(®ion_params, 0, sizeof(region_params)); 2593 2594 region_params.inst_const_size = fw_meta_info_params.inst_const_size; 2595 region_params.bss_data_size = fw_meta_info_params.bss_data_size; 2596 region_params.vbios_size = adev->bios_size; 2597 region_params.fw_bss_data = fw_meta_info_params.fw_bss_data; 2598 region_params.fw_inst_const = fw_meta_info_params.fw_inst_const; 2599 region_params.window_memory_type = window_memory_type; 2600 region_params.fw_info = (status == DMUB_STATUS_OK) ? &fw_info : NULL; 2601 2602 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2603 ®ion_info); 2604 2605 if (status != DMUB_STATUS_OK) { 2606 drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status); 2607 return -EINVAL; 2608 } 2609 2610 /* 2611 * Allocate a framebuffer based on the total size of all the regions. 2612 * TODO: Move this into GART. 2613 */ 2614 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2615 AMDGPU_GEM_DOMAIN_VRAM | 2616 AMDGPU_GEM_DOMAIN_GTT, 2617 &adev->dm.dmub_bo, 2618 &adev->dm.dmub_bo_gpu_addr, 2619 &adev->dm.dmub_bo_cpu_addr); 2620 if (r) 2621 return r; 2622 2623 /* Rebase the regions on the framebuffer address. */ 2624 memset(&memory_params, 0, sizeof(memory_params)); 2625 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2626 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2627 memory_params.region_info = ®ion_info; 2628 memory_params.window_memory_type = window_memory_type; 2629 2630 adev->dm.dmub_fb_info = kzalloc_obj(*adev->dm.dmub_fb_info); 2631 fb_info = adev->dm.dmub_fb_info; 2632 2633 if (!fb_info) { 2634 drm_err(adev_to_drm(adev), 2635 "Failed to allocate framebuffer info for DMUB service!\n"); 2636 return -ENOMEM; 2637 } 2638 2639 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2640 if (status != DMUB_STATUS_OK) { 2641 drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status); 2642 return -EINVAL; 2643 } 2644 2645 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); 2646 adev->dm.fw_inst_size = fw_meta_info_params.inst_const_size; 2647 2648 return 0; 2649 } 2650 2651 static int dm_sw_init(struct amdgpu_ip_block *ip_block) 2652 { 2653 struct amdgpu_device *adev = ip_block->adev; 2654 int r; 2655 2656 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 2657 2658 if (!adev->dm.cgs_device) { 2659 drm_err(adev_to_drm(adev), "failed to create cgs device.\n"); 2660 return -EINVAL; 2661 } 2662 2663 /* Moved from dm init since we need to use allocations for storing bounding box data */ 2664 INIT_LIST_HEAD(&adev->dm.da_list); 2665 2666 r = dm_dmub_sw_init(adev); 2667 if (r) 2668 return r; 2669 2670 return load_dmcu_fw(adev); 2671 } 2672 2673 static int dm_sw_fini(struct amdgpu_ip_block *ip_block) 2674 { 2675 struct amdgpu_device *adev = ip_block->adev; 2676 struct dal_allocation *da; 2677 2678 list_for_each_entry(da, &adev->dm.da_list, list) { 2679 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { 2680 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 2681 list_del(&da->list); 2682 kfree(da); 2683 adev->dm.bb_from_dmub = NULL; 2684 break; 2685 } 2686 } 2687 2688 2689 kfree(adev->dm.dmub_fb_info); 2690 adev->dm.dmub_fb_info = NULL; 2691 2692 if (adev->dm.dmub_srv) { 2693 dmub_srv_destroy(adev->dm.dmub_srv); 2694 kfree(adev->dm.dmub_srv); 2695 adev->dm.dmub_srv = NULL; 2696 } 2697 2698 amdgpu_ucode_release(&adev->dm.dmub_fw); 2699 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2700 2701 return 0; 2702 } 2703 2704 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2705 { 2706 struct amdgpu_dm_connector *aconnector; 2707 struct drm_connector *connector; 2708 struct drm_connector_list_iter iter; 2709 int ret = 0; 2710 2711 drm_connector_list_iter_begin(dev, &iter); 2712 drm_for_each_connector_iter(connector, &iter) { 2713 2714 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2715 continue; 2716 2717 aconnector = to_amdgpu_dm_connector(connector); 2718 if (aconnector->dc_link->type == dc_connection_mst_branch && 2719 aconnector->mst_mgr.aux) { 2720 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", 2721 aconnector, 2722 aconnector->base.base.id); 2723 2724 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2725 if (ret < 0) { 2726 drm_err(dev, "DM_MST: Failed to start MST\n"); 2727 aconnector->dc_link->type = 2728 dc_connection_single; 2729 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2730 aconnector->dc_link); 2731 break; 2732 } 2733 } 2734 } 2735 drm_connector_list_iter_end(&iter); 2736 2737 return ret; 2738 } 2739 2740 static int dm_late_init(struct amdgpu_ip_block *ip_block) 2741 { 2742 struct amdgpu_device *adev = ip_block->adev; 2743 2744 struct dmcu_iram_parameters params; 2745 unsigned int linear_lut[16]; 2746 int i; 2747 struct dmcu *dmcu = NULL; 2748 2749 dmcu = adev->dm.dc->res_pool->dmcu; 2750 2751 for (i = 0; i < 16; i++) 2752 linear_lut[i] = 0xFFFF * i / 15; 2753 2754 params.set = 0; 2755 params.backlight_ramping_override = false; 2756 params.backlight_ramping_start = 0xCCCC; 2757 params.backlight_ramping_reduction = 0xCCCCCCCC; 2758 params.backlight_lut_array_size = 16; 2759 params.backlight_lut_array = linear_lut; 2760 2761 /* Min backlight level after ABM reduction, Don't allow below 1% 2762 * 0xFFFF x 0.01 = 0x28F 2763 */ 2764 params.min_abm_backlight = 0x28F; 2765 /* In the case where abm is implemented on dmcub, 2766 * dmcu object will be null. 2767 * ABM 2.4 and up are implemented on dmcub. 2768 */ 2769 if (dmcu) { 2770 if (!dmcu_load_iram(dmcu, params)) 2771 return -EINVAL; 2772 } else if (adev->dm.dc->ctx->dmub_srv) { 2773 struct dc_link *edp_links[MAX_NUM_EDP]; 2774 int edp_num; 2775 2776 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2777 for (i = 0; i < edp_num; i++) { 2778 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2779 return -EINVAL; 2780 } 2781 } 2782 2783 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2784 } 2785 2786 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2787 { 2788 u8 buf[UUID_SIZE]; 2789 guid_t guid; 2790 int ret; 2791 2792 mutex_lock(&mgr->lock); 2793 if (!mgr->mst_primary) 2794 goto out_fail; 2795 2796 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2797 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2798 goto out_fail; 2799 } 2800 2801 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2802 DP_MST_EN | 2803 DP_UP_REQ_EN | 2804 DP_UPSTREAM_IS_SRC); 2805 if (ret < 0) { 2806 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2807 goto out_fail; 2808 } 2809 2810 /* Some hubs forget their guids after they resume */ 2811 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 2812 if (ret != sizeof(buf)) { 2813 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2814 goto out_fail; 2815 } 2816 2817 import_guid(&guid, buf); 2818 2819 if (guid_is_null(&guid)) { 2820 guid_gen(&guid); 2821 export_guid(buf, &guid); 2822 2823 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); 2824 2825 if (ret != sizeof(buf)) { 2826 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2827 goto out_fail; 2828 } 2829 } 2830 2831 guid_copy(&mgr->mst_primary->guid, &guid); 2832 2833 out_fail: 2834 mutex_unlock(&mgr->lock); 2835 } 2836 2837 void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector) 2838 { 2839 struct cec_notifier *n = aconnector->notifier; 2840 2841 if (!n) 2842 return; 2843 2844 cec_notifier_phys_addr_invalidate(n); 2845 } 2846 2847 void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector) 2848 { 2849 struct drm_connector *connector = &aconnector->base; 2850 struct cec_notifier *n = aconnector->notifier; 2851 2852 if (!n) 2853 return; 2854 2855 cec_notifier_set_phys_addr(n, 2856 connector->display_info.source_physical_address); 2857 } 2858 2859 static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend) 2860 { 2861 struct amdgpu_dm_connector *aconnector; 2862 struct drm_connector *connector; 2863 struct drm_connector_list_iter conn_iter; 2864 2865 drm_connector_list_iter_begin(ddev, &conn_iter); 2866 drm_for_each_connector_iter(connector, &conn_iter) { 2867 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2868 continue; 2869 2870 aconnector = to_amdgpu_dm_connector(connector); 2871 if (suspend) 2872 hdmi_cec_unset_edid(aconnector); 2873 else 2874 hdmi_cec_set_edid(aconnector); 2875 } 2876 drm_connector_list_iter_end(&conn_iter); 2877 } 2878 2879 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2880 { 2881 struct amdgpu_dm_connector *aconnector; 2882 struct drm_connector *connector; 2883 struct drm_connector_list_iter iter; 2884 struct drm_dp_mst_topology_mgr *mgr; 2885 2886 drm_connector_list_iter_begin(dev, &iter); 2887 drm_for_each_connector_iter(connector, &iter) { 2888 2889 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2890 continue; 2891 2892 aconnector = to_amdgpu_dm_connector(connector); 2893 if (aconnector->dc_link->type != dc_connection_mst_branch || 2894 aconnector->mst_root) 2895 continue; 2896 2897 mgr = &aconnector->mst_mgr; 2898 2899 if (suspend) { 2900 drm_dp_mst_topology_mgr_suspend(mgr); 2901 } else { 2902 /* if extended timeout is supported in hardware, 2903 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2904 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2905 */ 2906 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2907 if (!dp_is_lttpr_present(aconnector->dc_link)) 2908 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2909 2910 /* TODO: move resume_mst_branch_status() into drm mst resume again 2911 * once topology probing work is pulled out from mst resume into mst 2912 * resume 2nd step. mst resume 2nd step should be called after old 2913 * state getting restored (i.e. drm_atomic_helper_resume()). 2914 */ 2915 resume_mst_branch_status(mgr); 2916 } 2917 } 2918 drm_connector_list_iter_end(&iter); 2919 } 2920 2921 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2922 { 2923 int ret = 0; 2924 2925 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2926 * on window driver dc implementation. 2927 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2928 * should be passed to smu during boot up and resume from s3. 2929 * boot up: dc calculate dcn watermark clock settings within dc_create, 2930 * dcn20_resource_construct 2931 * then call pplib functions below to pass the settings to smu: 2932 * smu_set_watermarks_for_clock_ranges 2933 * smu_set_watermarks_table 2934 * navi10_set_watermarks_table 2935 * smu_write_watermarks_table 2936 * 2937 * For Renoir, clock settings of dcn watermark are also fixed values. 2938 * dc has implemented different flow for window driver: 2939 * dc_hardware_init / dc_set_power_state 2940 * dcn10_init_hw 2941 * notify_wm_ranges 2942 * set_wm_ranges 2943 * -- Linux 2944 * smu_set_watermarks_for_clock_ranges 2945 * renoir_set_watermarks_table 2946 * smu_write_watermarks_table 2947 * 2948 * For Linux, 2949 * dc_hardware_init -> amdgpu_dm_init 2950 * dc_set_power_state --> dm_resume 2951 * 2952 * therefore, this function apply to navi10/12/14 but not Renoir 2953 * * 2954 */ 2955 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2956 case IP_VERSION(2, 0, 2): 2957 case IP_VERSION(2, 0, 0): 2958 break; 2959 default: 2960 return 0; 2961 } 2962 2963 ret = amdgpu_dpm_write_watermarks_table(adev); 2964 if (ret) { 2965 drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n"); 2966 return ret; 2967 } 2968 2969 return 0; 2970 } 2971 2972 static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) 2973 { 2974 struct amdgpu_display_manager *dm = &adev->dm; 2975 struct amdgpu_i2c_adapter *oem_i2c; 2976 struct ddc_service *oem_ddc_service; 2977 int r; 2978 2979 oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc); 2980 if (oem_ddc_service) { 2981 oem_i2c = create_i2c(oem_ddc_service, true); 2982 if (!oem_i2c) { 2983 drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n"); 2984 return -ENOMEM; 2985 } 2986 2987 r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base); 2988 if (r) { 2989 drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); 2990 kfree(oem_i2c); 2991 return r; 2992 } 2993 dm->oem_i2c = oem_i2c; 2994 } 2995 2996 return 0; 2997 } 2998 2999 /** 3000 * dm_hw_init() - Initialize DC device 3001 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 3002 * 3003 * Initialize the &struct amdgpu_display_manager device. This involves calling 3004 * the initializers of each DM component, then populating the struct with them. 3005 * 3006 * Although the function implies hardware initialization, both hardware and 3007 * software are initialized here. Splitting them out to their relevant init 3008 * hooks is a future TODO item. 3009 * 3010 * Some notable things that are initialized here: 3011 * 3012 * - Display Core, both software and hardware 3013 * - DC modules that we need (freesync and color management) 3014 * - DRM software states 3015 * - Interrupt sources and handlers 3016 * - Vblank support 3017 * - Debug FS entries, if enabled 3018 */ 3019 static int dm_hw_init(struct amdgpu_ip_block *ip_block) 3020 { 3021 struct amdgpu_device *adev = ip_block->adev; 3022 int r; 3023 3024 /* Create DAL display manager */ 3025 r = amdgpu_dm_init(adev); 3026 if (r) 3027 return r; 3028 amdgpu_dm_hpd_init(adev); 3029 3030 r = dm_oem_i2c_hw_init(adev); 3031 if (r) 3032 drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n"); 3033 3034 return 0; 3035 } 3036 3037 /** 3038 * dm_hw_fini() - Teardown DC device 3039 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 3040 * 3041 * Teardown components within &struct amdgpu_display_manager that require 3042 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 3043 * were loaded. Also flush IRQ workqueues and disable them. 3044 */ 3045 static int dm_hw_fini(struct amdgpu_ip_block *ip_block) 3046 { 3047 struct amdgpu_device *adev = ip_block->adev; 3048 3049 amdgpu_dm_hpd_fini(adev); 3050 3051 amdgpu_dm_irq_fini(adev); 3052 amdgpu_dm_fini(adev); 3053 return 0; 3054 } 3055 3056 3057 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 3058 struct dc_state *state, bool enable) 3059 { 3060 enum dc_irq_source irq_source; 3061 struct amdgpu_crtc *acrtc; 3062 int rc = -EBUSY; 3063 int i = 0; 3064 3065 for (i = 0; i < state->stream_count; i++) { 3066 acrtc = get_crtc_by_otg_inst( 3067 adev, state->stream_status[i].primary_otg_inst); 3068 3069 if (acrtc && state->stream_status[i].plane_count != 0) { 3070 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 3071 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 3072 if (rc) 3073 drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", 3074 enable ? "enable" : "disable"); 3075 3076 if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { 3077 if (enable) { 3078 if (amdgpu_dm_crtc_vrr_active( 3079 to_dm_crtc_state(acrtc->base.state))) 3080 rc = amdgpu_dm_crtc_set_vupdate_irq( 3081 &acrtc->base, true); 3082 } else 3083 rc = amdgpu_dm_crtc_set_vupdate_irq( 3084 &acrtc->base, false); 3085 3086 if (rc) 3087 drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", 3088 enable ? "en" : "dis"); 3089 } 3090 3091 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 3092 /* During gpu-reset we disable and then enable vblank irq, so 3093 * don't use amdgpu_irq_get/put() to avoid refcount change. 3094 */ 3095 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 3096 drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 3097 } 3098 } 3099 3100 } 3101 3102 DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T)) 3103 3104 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 3105 { 3106 struct dc_state *context __free(state_release) = NULL; 3107 int i; 3108 struct dc_stream_state *del_streams[MAX_PIPES]; 3109 int del_streams_count = 0; 3110 struct dc_commit_streams_params params = {}; 3111 3112 memset(del_streams, 0, sizeof(del_streams)); 3113 3114 context = dc_state_create_current_copy(dc); 3115 if (context == NULL) 3116 return DC_ERROR_UNEXPECTED; 3117 3118 /* First remove from context all streams */ 3119 for (i = 0; i < context->stream_count; i++) { 3120 struct dc_stream_state *stream = context->streams[i]; 3121 3122 del_streams[del_streams_count++] = stream; 3123 } 3124 3125 /* Remove all planes for removed streams and then remove the streams */ 3126 for (i = 0; i < del_streams_count; i++) { 3127 enum dc_status res; 3128 3129 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) 3130 return DC_FAIL_DETACH_SURFACES; 3131 3132 res = dc_state_remove_stream(dc, context, del_streams[i]); 3133 if (res != DC_OK) 3134 return res; 3135 } 3136 3137 params.streams = context->streams; 3138 params.stream_count = context->stream_count; 3139 3140 return dc_commit_streams(dc, ¶ms); 3141 } 3142 3143 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 3144 { 3145 int i; 3146 3147 if (dm->hpd_rx_offload_wq) { 3148 for (i = 0; i < dm->dc->caps.max_links; i++) 3149 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 3150 } 3151 } 3152 3153 static int dm_cache_state(struct amdgpu_device *adev) 3154 { 3155 int r; 3156 3157 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3158 if (IS_ERR(adev->dm.cached_state)) { 3159 r = PTR_ERR(adev->dm.cached_state); 3160 adev->dm.cached_state = NULL; 3161 } 3162 3163 return adev->dm.cached_state ? 0 : r; 3164 } 3165 3166 static void dm_destroy_cached_state(struct amdgpu_device *adev) 3167 { 3168 struct amdgpu_display_manager *dm = &adev->dm; 3169 struct drm_device *ddev = adev_to_drm(adev); 3170 struct dm_plane_state *dm_new_plane_state; 3171 struct drm_plane_state *new_plane_state; 3172 struct dm_crtc_state *dm_new_crtc_state; 3173 struct drm_crtc_state *new_crtc_state; 3174 struct drm_plane *plane; 3175 struct drm_crtc *crtc; 3176 int i; 3177 3178 if (!dm->cached_state) 3179 return; 3180 3181 /* Force mode set in atomic commit */ 3182 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3183 new_crtc_state->active_changed = true; 3184 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3185 reset_freesync_config_for_crtc(dm_new_crtc_state); 3186 } 3187 3188 /* 3189 * atomic_check is expected to create the dc states. We need to release 3190 * them here, since they were duplicated as part of the suspend 3191 * procedure. 3192 */ 3193 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3194 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3195 if (dm_new_crtc_state->stream) { 3196 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 3197 dc_stream_release(dm_new_crtc_state->stream); 3198 dm_new_crtc_state->stream = NULL; 3199 } 3200 dm_new_crtc_state->base.color_mgmt_changed = true; 3201 } 3202 3203 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 3204 dm_new_plane_state = to_dm_plane_state(new_plane_state); 3205 if (dm_new_plane_state->dc_state) { 3206 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 3207 dc_plane_state_release(dm_new_plane_state->dc_state); 3208 dm_new_plane_state->dc_state = NULL; 3209 } 3210 } 3211 3212 drm_atomic_helper_resume(ddev, dm->cached_state); 3213 3214 dm->cached_state = NULL; 3215 } 3216 3217 static int dm_suspend(struct amdgpu_ip_block *ip_block) 3218 { 3219 struct amdgpu_device *adev = ip_block->adev; 3220 struct amdgpu_display_manager *dm = &adev->dm; 3221 3222 if (amdgpu_in_reset(adev)) { 3223 enum dc_status res; 3224 3225 mutex_lock(&dm->dc_lock); 3226 3227 dc_allow_idle_optimizations(adev->dm.dc, false); 3228 3229 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 3230 3231 if (dm->cached_dc_state) 3232 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 3233 3234 res = amdgpu_dm_commit_zero_streams(dm->dc); 3235 if (res != DC_OK) { 3236 drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res); 3237 return -EINVAL; 3238 } 3239 3240 amdgpu_dm_irq_suspend(adev); 3241 3242 hpd_rx_irq_work_suspend(dm); 3243 3244 return 0; 3245 } 3246 3247 if (!adev->dm.cached_state) { 3248 int r = dm_cache_state(adev); 3249 3250 if (r) 3251 return r; 3252 } 3253 3254 s3_handle_hdmi_cec(adev_to_drm(adev), true); 3255 3256 s3_handle_mst(adev_to_drm(adev), true); 3257 3258 amdgpu_dm_irq_suspend(adev); 3259 3260 hpd_rx_irq_work_suspend(dm); 3261 3262 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 3263 3264 if (dm->dc->caps.ips_support && adev->in_s0ix) 3265 dc_allow_idle_optimizations(dm->dc, true); 3266 3267 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 3268 3269 return 0; 3270 } 3271 3272 struct drm_connector * 3273 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 3274 struct drm_crtc *crtc) 3275 { 3276 u32 i; 3277 struct drm_connector_state *new_con_state; 3278 struct drm_connector *connector; 3279 struct drm_crtc *crtc_from_state; 3280 3281 for_each_new_connector_in_state(state, connector, new_con_state, i) { 3282 crtc_from_state = new_con_state->crtc; 3283 3284 if (crtc_from_state == crtc) 3285 return connector; 3286 } 3287 3288 return NULL; 3289 } 3290 3291 static void emulated_link_detect(struct dc_link *link) 3292 { 3293 struct dc_sink_init_data sink_init_data = { 0 }; 3294 struct display_sink_capability sink_caps = { 0 }; 3295 enum dc_edid_status edid_status; 3296 struct dc_context *dc_ctx = link->ctx; 3297 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 3298 struct dc_sink *sink = NULL; 3299 struct dc_sink *prev_sink = NULL; 3300 3301 link->type = dc_connection_none; 3302 prev_sink = link->local_sink; 3303 3304 if (prev_sink) 3305 dc_sink_release(prev_sink); 3306 3307 switch (link->connector_signal) { 3308 case SIGNAL_TYPE_HDMI_TYPE_A: { 3309 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3310 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 3311 break; 3312 } 3313 3314 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 3315 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3316 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 3317 break; 3318 } 3319 3320 case SIGNAL_TYPE_DVI_DUAL_LINK: { 3321 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3322 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 3323 break; 3324 } 3325 3326 case SIGNAL_TYPE_LVDS: { 3327 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3328 sink_caps.signal = SIGNAL_TYPE_LVDS; 3329 break; 3330 } 3331 3332 case SIGNAL_TYPE_EDP: { 3333 sink_caps.transaction_type = 3334 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3335 sink_caps.signal = SIGNAL_TYPE_EDP; 3336 break; 3337 } 3338 3339 case SIGNAL_TYPE_DISPLAY_PORT: { 3340 sink_caps.transaction_type = 3341 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3342 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 3343 break; 3344 } 3345 3346 default: 3347 drm_err(dev, "Invalid connector type! signal:%d\n", 3348 link->connector_signal); 3349 return; 3350 } 3351 3352 sink_init_data.link = link; 3353 sink_init_data.sink_signal = sink_caps.signal; 3354 3355 sink = dc_sink_create(&sink_init_data); 3356 if (!sink) { 3357 drm_err(dev, "Failed to create sink!\n"); 3358 return; 3359 } 3360 3361 /* dc_sink_create returns a new reference */ 3362 link->local_sink = sink; 3363 3364 edid_status = dm_helpers_read_local_edid( 3365 link->ctx, 3366 link, 3367 sink); 3368 3369 if (edid_status != EDID_OK) 3370 drm_err(dev, "Failed to read EDID\n"); 3371 3372 } 3373 3374 static void dm_gpureset_commit_state(struct dc_state *dc_state, 3375 struct amdgpu_display_manager *dm) 3376 { 3377 struct { 3378 struct dc_surface_update surface_updates[MAX_SURFACES]; 3379 struct dc_plane_info plane_infos[MAX_SURFACES]; 3380 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 3381 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 3382 struct dc_stream_update stream_update; 3383 } *bundle __free(kfree); 3384 int k, m; 3385 3386 bundle = kzalloc_obj(*bundle); 3387 3388 if (!bundle) { 3389 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 3390 return; 3391 } 3392 3393 for (k = 0; k < dc_state->stream_count; k++) { 3394 bundle->stream_update.stream = dc_state->streams[k]; 3395 3396 for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { 3397 bundle->surface_updates[m].surface = 3398 dc_state->stream_status[k].plane_states[m]; 3399 bundle->surface_updates[m].surface->force_full_update = 3400 true; 3401 } 3402 3403 update_planes_and_stream_adapter(dm->dc, 3404 UPDATE_TYPE_FULL, 3405 dc_state->stream_status[k].plane_count, 3406 dc_state->streams[k], 3407 &bundle->stream_update, 3408 bundle->surface_updates); 3409 } 3410 } 3411 3412 static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, 3413 struct dc_sink *sink) 3414 { 3415 struct dc_panel_patch *ppatch = NULL; 3416 3417 if (!sink) 3418 return; 3419 3420 ppatch = &sink->edid_caps.panel_patch; 3421 if (ppatch->wait_after_dpcd_poweroff_ms) { 3422 msleep(ppatch->wait_after_dpcd_poweroff_ms); 3423 drm_dbg_driver(adev_to_drm(adev), 3424 "%s: adding a %ds delay as w/a for panel\n", 3425 __func__, 3426 ppatch->wait_after_dpcd_poweroff_ms / 1000); 3427 } 3428 } 3429 3430 /** 3431 * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks 3432 * @adev: amdgpu device pointer 3433 * 3434 * Iterates through all DC links and dumps information about local and remote 3435 * (MST) sinks. Should be called after connector detection is complete to see 3436 * the final state of all links. 3437 */ 3438 static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev) 3439 { 3440 struct dc *dc = adev->dm.dc; 3441 struct drm_device *dev = adev_to_drm(adev); 3442 int li; 3443 3444 if (!dc) 3445 return; 3446 3447 for (li = 0; li < dc->link_count; li++) { 3448 struct dc_link *l = dc->links[li]; 3449 const char *name = NULL; 3450 int rs; 3451 3452 if (!l) 3453 continue; 3454 if (l->local_sink && l->local_sink->edid_caps.display_name[0]) 3455 name = l->local_sink->edid_caps.display_name; 3456 else 3457 name = "n/a"; 3458 3459 drm_dbg_kms(dev, 3460 "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n", 3461 li, 3462 l->local_sink, 3463 l->type, 3464 l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE, 3465 l->sink_count, 3466 name, 3467 l->dpcd_caps.is_mst_capable, 3468 l->mst_stream_alloc_table.stream_count); 3469 3470 /* Dump remote (MST) sinks if any */ 3471 for (rs = 0; rs < l->sink_count; rs++) { 3472 struct dc_sink *rsink = l->remote_sinks[rs]; 3473 const char *rname = NULL; 3474 3475 if (!rsink) 3476 continue; 3477 if (rsink->edid_caps.display_name[0]) 3478 rname = rsink->edid_caps.display_name; 3479 else 3480 rname = "n/a"; 3481 drm_dbg_kms(dev, 3482 " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n", 3483 li, rs, 3484 rsink, 3485 rsink->sink_signal, 3486 rname); 3487 } 3488 } 3489 } 3490 3491 static int dm_resume(struct amdgpu_ip_block *ip_block) 3492 { 3493 struct amdgpu_device *adev = ip_block->adev; 3494 struct drm_device *ddev = adev_to_drm(adev); 3495 struct amdgpu_display_manager *dm = &adev->dm; 3496 struct amdgpu_dm_connector *aconnector; 3497 struct drm_connector *connector; 3498 struct drm_connector_list_iter iter; 3499 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 3500 enum dc_connection_type new_connection_type = dc_connection_none; 3501 struct dc_state *dc_state; 3502 int i, r, j; 3503 struct dc_commit_streams_params commit_params = {}; 3504 3505 if (dm->dc->caps.ips_support) { 3506 if (!amdgpu_in_reset(adev)) 3507 mutex_lock(&dm->dc_lock); 3508 3509 /* Need to set POWER_STATE_D0 first or it will not execute 3510 * idle_power_optimizations command to DMUB. 3511 */ 3512 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3513 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3514 3515 if (!amdgpu_in_reset(adev)) 3516 mutex_unlock(&dm->dc_lock); 3517 } 3518 3519 if (amdgpu_in_reset(adev)) { 3520 dc_state = dm->cached_dc_state; 3521 3522 /* 3523 * The dc->current_state is backed up into dm->cached_dc_state 3524 * before we commit 0 streams. 3525 * 3526 * DC will clear link encoder assignments on the real state 3527 * but the changes won't propagate over to the copy we made 3528 * before the 0 streams commit. 3529 * 3530 * DC expects that link encoder assignments are *not* valid 3531 * when committing a state, so as a workaround we can copy 3532 * off of the current state. 3533 * 3534 * We lose the previous assignments, but we had already 3535 * commit 0 streams anyway. 3536 */ 3537 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 3538 3539 r = dm_dmub_hw_init(adev); 3540 if (r) { 3541 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 3542 return r; 3543 } 3544 3545 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3546 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3547 3548 dc_resume(dm->dc); 3549 3550 amdgpu_dm_irq_resume_early(adev); 3551 3552 for (i = 0; i < dc_state->stream_count; i++) { 3553 dc_state->streams[i]->mode_changed = true; 3554 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 3555 dc_state->stream_status[i].plane_states[j]->update_flags.raw 3556 = 0xffffffff; 3557 } 3558 } 3559 3560 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3561 amdgpu_dm_outbox_init(adev); 3562 dc_enable_dmub_outbox(adev->dm.dc); 3563 } 3564 3565 commit_params.streams = dc_state->streams; 3566 commit_params.stream_count = dc_state->stream_count; 3567 dc_exit_ips_for_hw_access(dm->dc); 3568 WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); 3569 3570 dm_gpureset_commit_state(dm->cached_dc_state, dm); 3571 3572 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 3573 3574 dc_state_release(dm->cached_dc_state); 3575 dm->cached_dc_state = NULL; 3576 3577 amdgpu_dm_irq_resume_late(adev); 3578 3579 mutex_unlock(&dm->dc_lock); 3580 3581 /* set the backlight after a reset */ 3582 for (i = 0; i < dm->num_of_edps; i++) { 3583 if (dm->backlight_dev[i]) 3584 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 3585 } 3586 3587 return 0; 3588 } 3589 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3590 dc_state_release(dm_state->context); 3591 dm_state->context = dc_state_create(dm->dc, NULL); 3592 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 3593 3594 /* Before powering on DC we need to re-initialize DMUB. */ 3595 dm_dmub_hw_resume(adev); 3596 3597 /* Re-enable outbox interrupts for DPIA. */ 3598 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3599 amdgpu_dm_outbox_init(adev); 3600 dc_enable_dmub_outbox(adev->dm.dc); 3601 } 3602 3603 /* power on hardware */ 3604 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3605 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3606 3607 /* program HPD filter */ 3608 dc_resume(dm->dc); 3609 3610 /* 3611 * early enable HPD Rx IRQ, should be done before set mode as short 3612 * pulse interrupts are used for MST 3613 */ 3614 amdgpu_dm_irq_resume_early(adev); 3615 3616 s3_handle_hdmi_cec(ddev, false); 3617 3618 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 3619 s3_handle_mst(ddev, false); 3620 3621 /* Do detection*/ 3622 drm_connector_list_iter_begin(ddev, &iter); 3623 drm_for_each_connector_iter(connector, &iter) { 3624 bool ret; 3625 3626 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3627 continue; 3628 3629 aconnector = to_amdgpu_dm_connector(connector); 3630 3631 if (!aconnector->dc_link) 3632 continue; 3633 3634 /* 3635 * this is the case when traversing through already created end sink 3636 * MST connectors, should be skipped 3637 */ 3638 if (aconnector->mst_root) 3639 continue; 3640 3641 /* Skip eDP detection, when there is no sink present */ 3642 if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_EDP && 3643 !aconnector->dc_link->edp_sink_present) 3644 continue; 3645 3646 guard(mutex)(&aconnector->hpd_lock); 3647 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3648 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 3649 3650 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3651 emulated_link_detect(aconnector->dc_link); 3652 } else { 3653 guard(mutex)(&dm->dc_lock); 3654 dc_exit_ips_for_hw_access(dm->dc); 3655 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); 3656 if (ret) { 3657 /* w/a delay for certain panels */ 3658 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); 3659 } 3660 } 3661 3662 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 3663 aconnector->fake_enable = false; 3664 3665 if (aconnector->dc_sink) 3666 dc_sink_release(aconnector->dc_sink); 3667 aconnector->dc_sink = NULL; 3668 amdgpu_dm_update_connector_after_detect(aconnector); 3669 } 3670 drm_connector_list_iter_end(&iter); 3671 3672 dm_destroy_cached_state(adev); 3673 3674 /* Do mst topology probing after resuming cached state*/ 3675 drm_connector_list_iter_begin(ddev, &iter); 3676 drm_for_each_connector_iter(connector, &iter) { 3677 bool init = false; 3678 3679 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3680 continue; 3681 3682 aconnector = to_amdgpu_dm_connector(connector); 3683 if (aconnector->dc_link->type != dc_connection_mst_branch || 3684 aconnector->mst_root) 3685 continue; 3686 3687 scoped_guard(mutex, &aconnector->mst_mgr.lock) { 3688 init = !aconnector->mst_mgr.mst_primary; 3689 } 3690 if (init) 3691 dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx, 3692 aconnector->dc_link, false); 3693 else 3694 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3695 } 3696 drm_connector_list_iter_end(&iter); 3697 3698 /* Debug dump: list all DC links and their associated sinks after detection 3699 * is complete for all connectors. This provides a comprehensive view of the 3700 * final state without repeating the dump for each connector. 3701 */ 3702 amdgpu_dm_dump_links_and_sinks(adev); 3703 3704 amdgpu_dm_irq_resume_late(adev); 3705 3706 amdgpu_dm_smu_write_watermarks_table(adev); 3707 3708 drm_kms_helper_hotplug_event(ddev); 3709 3710 return 0; 3711 } 3712 3713 /** 3714 * DOC: DM Lifecycle 3715 * 3716 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3717 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3718 * the base driver's device list to be initialized and torn down accordingly. 3719 * 3720 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3721 */ 3722 3723 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3724 .name = "dm", 3725 .early_init = dm_early_init, 3726 .late_init = dm_late_init, 3727 .sw_init = dm_sw_init, 3728 .sw_fini = dm_sw_fini, 3729 .early_fini = amdgpu_dm_early_fini, 3730 .hw_init = dm_hw_init, 3731 .hw_fini = dm_hw_fini, 3732 .suspend = dm_suspend, 3733 .resume = dm_resume, 3734 .is_idle = dm_is_idle, 3735 .wait_for_idle = dm_wait_for_idle, 3736 .check_soft_reset = dm_check_soft_reset, 3737 .soft_reset = dm_soft_reset, 3738 .set_clockgating_state = dm_set_clockgating_state, 3739 .set_powergating_state = dm_set_powergating_state, 3740 }; 3741 3742 const struct amdgpu_ip_block_version dm_ip_block = { 3743 .type = AMD_IP_BLOCK_TYPE_DCE, 3744 .major = 1, 3745 .minor = 0, 3746 .rev = 0, 3747 .funcs = &amdgpu_dm_funcs, 3748 }; 3749 3750 3751 /** 3752 * DOC: atomic 3753 * 3754 * *WIP* 3755 */ 3756 3757 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3758 .fb_create = amdgpu_display_user_framebuffer_create, 3759 .get_format_info = amdgpu_dm_plane_get_format_info, 3760 .atomic_check = amdgpu_dm_atomic_check, 3761 .atomic_commit = drm_atomic_helper_commit, 3762 }; 3763 3764 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3765 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3766 .atomic_commit_setup = amdgpu_dm_atomic_setup_commit, 3767 }; 3768 3769 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3770 { 3771 const struct drm_panel_backlight_quirk *panel_backlight_quirk; 3772 struct amdgpu_dm_backlight_caps *caps; 3773 struct drm_connector *conn_base; 3774 struct amdgpu_device *adev; 3775 struct drm_luminance_range_info *luminance_range; 3776 struct drm_device *drm; 3777 3778 if (aconnector->bl_idx == -1 || 3779 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3780 return; 3781 3782 conn_base = &aconnector->base; 3783 drm = conn_base->dev; 3784 adev = drm_to_adev(drm); 3785 3786 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3787 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3788 caps->aux_support = false; 3789 3790 if (caps->ext_caps->bits.oled == 1 3791 /* 3792 * || 3793 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3794 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3795 */) 3796 caps->aux_support = true; 3797 3798 if (amdgpu_backlight == 0) 3799 caps->aux_support = false; 3800 else if (amdgpu_backlight == 1) 3801 caps->aux_support = true; 3802 if (caps->aux_support) 3803 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; 3804 3805 luminance_range = &conn_base->display_info.luminance_range; 3806 3807 if (luminance_range->max_luminance) 3808 caps->aux_max_input_signal = luminance_range->max_luminance; 3809 else 3810 caps->aux_max_input_signal = 512; 3811 3812 if (luminance_range->min_luminance) 3813 caps->aux_min_input_signal = luminance_range->min_luminance; 3814 else 3815 caps->aux_min_input_signal = 1; 3816 3817 panel_backlight_quirk = 3818 drm_get_panel_backlight_quirk(aconnector->drm_edid); 3819 if (!IS_ERR_OR_NULL(panel_backlight_quirk)) { 3820 if (panel_backlight_quirk->min_brightness) { 3821 caps->min_input_signal = 3822 panel_backlight_quirk->min_brightness - 1; 3823 drm_info(drm, 3824 "Applying panel backlight quirk, min_brightness: %d\n", 3825 caps->min_input_signal); 3826 } 3827 if (panel_backlight_quirk->brightness_mask) { 3828 drm_info(drm, 3829 "Applying panel backlight quirk, brightness_mask: 0x%X\n", 3830 panel_backlight_quirk->brightness_mask); 3831 caps->brightness_mask = 3832 panel_backlight_quirk->brightness_mask; 3833 } 3834 } 3835 } 3836 3837 DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) 3838 3839 void amdgpu_dm_update_connector_after_detect( 3840 struct amdgpu_dm_connector *aconnector) 3841 { 3842 struct drm_connector *connector = &aconnector->base; 3843 struct dc_sink *sink __free(sink_release) = NULL; 3844 struct drm_device *dev = connector->dev; 3845 3846 /* MST handled by drm_mst framework */ 3847 if (aconnector->mst_mgr.mst_state == true) 3848 return; 3849 3850 sink = aconnector->dc_link->local_sink; 3851 if (sink) 3852 dc_sink_retain(sink); 3853 3854 /* 3855 * Edid mgmt connector gets first update only in mode_valid hook and then 3856 * the connector sink is set to either fake or physical sink depends on link status. 3857 * Skip if already done during boot. 3858 */ 3859 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3860 && aconnector->dc_em_sink) { 3861 3862 /* 3863 * For S3 resume with headless use eml_sink to fake stream 3864 * because on resume connector->sink is set to NULL 3865 */ 3866 guard(mutex)(&dev->mode_config.mutex); 3867 3868 if (sink) { 3869 if (aconnector->dc_sink) { 3870 amdgpu_dm_update_freesync_caps(connector, NULL); 3871 /* 3872 * retain and release below are used to 3873 * bump up refcount for sink because the link doesn't point 3874 * to it anymore after disconnect, so on next crtc to connector 3875 * reshuffle by UMD we will get into unwanted dc_sink release 3876 */ 3877 dc_sink_release(aconnector->dc_sink); 3878 } 3879 aconnector->dc_sink = sink; 3880 dc_sink_retain(aconnector->dc_sink); 3881 amdgpu_dm_update_freesync_caps(connector, 3882 aconnector->drm_edid); 3883 } else { 3884 amdgpu_dm_update_freesync_caps(connector, NULL); 3885 if (!aconnector->dc_sink) { 3886 aconnector->dc_sink = aconnector->dc_em_sink; 3887 dc_sink_retain(aconnector->dc_sink); 3888 } 3889 } 3890 3891 return; 3892 } 3893 3894 /* 3895 * TODO: temporary guard to look for proper fix 3896 * if this sink is MST sink, we should not do anything 3897 */ 3898 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 3899 return; 3900 3901 if (aconnector->dc_sink == sink) { 3902 /* 3903 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3904 * Do nothing!! 3905 */ 3906 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", 3907 aconnector->connector_id); 3908 return; 3909 } 3910 3911 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3912 aconnector->connector_id, aconnector->dc_sink, sink); 3913 3914 /* When polling, DRM has already locked the mutex for us. */ 3915 if (!drm_kms_helper_is_poll_worker()) 3916 mutex_lock(&dev->mode_config.mutex); 3917 3918 /* 3919 * 1. Update status of the drm connector 3920 * 2. Send an event and let userspace tell us what to do 3921 */ 3922 if (sink) { 3923 /* 3924 * TODO: check if we still need the S3 mode update workaround. 3925 * If yes, put it here. 3926 */ 3927 if (aconnector->dc_sink) { 3928 amdgpu_dm_update_freesync_caps(connector, NULL); 3929 dc_sink_release(aconnector->dc_sink); 3930 } 3931 3932 aconnector->dc_sink = sink; 3933 dc_sink_retain(aconnector->dc_sink); 3934 if (sink->dc_edid.length == 0) { 3935 aconnector->drm_edid = NULL; 3936 hdmi_cec_unset_edid(aconnector); 3937 if (aconnector->dc_link->aux_mode) { 3938 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3939 } 3940 } else { 3941 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; 3942 3943 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); 3944 drm_edid_connector_update(connector, aconnector->drm_edid); 3945 3946 hdmi_cec_set_edid(aconnector); 3947 if (aconnector->dc_link->aux_mode) 3948 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, 3949 connector->display_info.source_physical_address); 3950 } 3951 3952 if (!aconnector->timing_requested) { 3953 aconnector->timing_requested = 3954 kzalloc_obj(struct dc_crtc_timing); 3955 if (!aconnector->timing_requested) 3956 drm_err(dev, 3957 "failed to create aconnector->requested_timing\n"); 3958 } 3959 3960 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 3961 update_connector_ext_caps(aconnector); 3962 } else { 3963 hdmi_cec_unset_edid(aconnector); 3964 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3965 amdgpu_dm_update_freesync_caps(connector, NULL); 3966 aconnector->num_modes = 0; 3967 dc_sink_release(aconnector->dc_sink); 3968 aconnector->dc_sink = NULL; 3969 drm_edid_free(aconnector->drm_edid); 3970 aconnector->drm_edid = NULL; 3971 kfree(aconnector->timing_requested); 3972 aconnector->timing_requested = NULL; 3973 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3974 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3975 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3976 } 3977 3978 update_subconnector_property(aconnector); 3979 3980 /* When polling, the mutex will be unlocked for us by DRM. */ 3981 if (!drm_kms_helper_is_poll_worker()) 3982 mutex_unlock(&dev->mode_config.mutex); 3983 } 3984 3985 static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2) 3986 { 3987 if (!sink1 || !sink2) 3988 return false; 3989 if (sink1->sink_signal != sink2->sink_signal) 3990 return false; 3991 3992 if (sink1->dc_edid.length != sink2->dc_edid.length) 3993 return false; 3994 3995 if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid, 3996 sink1->dc_edid.length) != 0) 3997 return false; 3998 return true; 3999 } 4000 4001 4002 /** 4003 * DOC: hdmi_hpd_debounce_work 4004 * 4005 * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD 4006 * (such as during power save transitions), this delay determines how long to 4007 * wait before processing the HPD event. This allows distinguishing between a 4008 * physical unplug (>hdmi_hpd_debounce_delay) 4009 * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay). 4010 * 4011 * If the toggle is less than this delay, the driver compares sink capabilities 4012 * and permits a hotplug event if they changed. 4013 * 4014 * The default value of 1500ms was chosen based on experimental testing with 4015 * various monitors that exhibit spontaneous HPD toggling behavior. 4016 */ 4017 static void hdmi_hpd_debounce_work(struct work_struct *work) 4018 { 4019 struct amdgpu_dm_connector *aconnector = 4020 container_of(to_delayed_work(work), struct amdgpu_dm_connector, 4021 hdmi_hpd_debounce_work); 4022 struct drm_connector *connector = &aconnector->base; 4023 struct drm_device *dev = connector->dev; 4024 struct amdgpu_device *adev = drm_to_adev(dev); 4025 struct dc *dc = aconnector->dc_link->ctx->dc; 4026 bool fake_reconnect = false; 4027 bool reallow_idle = false; 4028 bool ret = false; 4029 guard(mutex)(&aconnector->hpd_lock); 4030 4031 /* Re-detect the display */ 4032 scoped_guard(mutex, &adev->dm.dc_lock) { 4033 if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) { 4034 dc_allow_idle_optimizations(dc, false); 4035 reallow_idle = true; 4036 } 4037 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 4038 } 4039 4040 if (ret) { 4041 /* Apply workaround delay for certain panels */ 4042 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); 4043 /* Compare sinks to determine if this was a spontaneous HPD toggle */ 4044 if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) { 4045 /* 4046 * Sinks match - this was a spontaneous HDMI HPD toggle. 4047 */ 4048 drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n"); 4049 fake_reconnect = true; 4050 } 4051 4052 /* Update connector state */ 4053 amdgpu_dm_update_connector_after_detect(aconnector); 4054 4055 drm_modeset_lock_all(dev); 4056 dm_restore_drm_connector_state(dev, connector); 4057 drm_modeset_unlock_all(dev); 4058 4059 /* Only notify OS if sink actually changed */ 4060 if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED) 4061 drm_kms_helper_hotplug_event(dev); 4062 } 4063 4064 /* Release the cached sink reference */ 4065 if (aconnector->hdmi_prev_sink) { 4066 dc_sink_release(aconnector->hdmi_prev_sink); 4067 aconnector->hdmi_prev_sink = NULL; 4068 } 4069 4070 scoped_guard(mutex, &adev->dm.dc_lock) { 4071 if (reallow_idle && dc->caps.ips_support) 4072 dc_allow_idle_optimizations(dc, true); 4073 } 4074 } 4075 4076 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 4077 { 4078 struct drm_connector *connector = &aconnector->base; 4079 struct drm_device *dev = connector->dev; 4080 enum dc_connection_type new_connection_type = dc_connection_none; 4081 struct amdgpu_device *adev = drm_to_adev(dev); 4082 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 4083 struct dc *dc = aconnector->dc_link->ctx->dc; 4084 bool ret = false; 4085 bool debounce_required = false; 4086 4087 if (adev->dm.disable_hpd_irq) 4088 return; 4089 4090 /* 4091 * In case of failure or MST no need to update connector status or notify the OS 4092 * since (for MST case) MST does this in its own context. 4093 */ 4094 guard(mutex)(&aconnector->hpd_lock); 4095 4096 if (adev->dm.hdcp_workqueue) { 4097 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 4098 dm_con_state->update_hdcp = true; 4099 } 4100 if (aconnector->fake_enable) 4101 aconnector->fake_enable = false; 4102 4103 aconnector->timing_changed = false; 4104 4105 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 4106 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 4107 4108 /* 4109 * Check for HDMI disconnect with debounce enabled. 4110 */ 4111 debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 && 4112 dc_is_hdmi_signal(aconnector->dc_link->connector_signal) && 4113 new_connection_type == dc_connection_none && 4114 aconnector->dc_link->local_sink != NULL); 4115 4116 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4117 emulated_link_detect(aconnector->dc_link); 4118 4119 drm_modeset_lock_all(dev); 4120 dm_restore_drm_connector_state(dev, connector); 4121 drm_modeset_unlock_all(dev); 4122 4123 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 4124 drm_kms_helper_connector_hotplug_event(connector); 4125 } else if (debounce_required) { 4126 /* 4127 * HDMI disconnect detected - schedule delayed work instead of 4128 * processing immediately. This allows us to coalesce spurious 4129 * HDMI signals from physical unplugs. 4130 */ 4131 drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n", 4132 aconnector->hdmi_hpd_debounce_delay_ms); 4133 4134 /* Cache the current sink for later comparison */ 4135 if (aconnector->hdmi_prev_sink) 4136 dc_sink_release(aconnector->hdmi_prev_sink); 4137 aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink; 4138 if (aconnector->hdmi_prev_sink) 4139 dc_sink_retain(aconnector->hdmi_prev_sink); 4140 4141 /* Schedule delayed detection. */ 4142 if (mod_delayed_work(system_wq, 4143 &aconnector->hdmi_hpd_debounce_work, 4144 msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms))) 4145 drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n"); 4146 4147 } else { 4148 4149 /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */ 4150 if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work)) 4151 return; 4152 4153 scoped_guard(mutex, &adev->dm.dc_lock) { 4154 dc_exit_ips_for_hw_access(dc); 4155 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 4156 } 4157 if (ret) { 4158 /* w/a delay for certain panels */ 4159 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); 4160 amdgpu_dm_update_connector_after_detect(aconnector); 4161 4162 drm_modeset_lock_all(dev); 4163 dm_restore_drm_connector_state(dev, connector); 4164 drm_modeset_unlock_all(dev); 4165 4166 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 4167 drm_kms_helper_connector_hotplug_event(connector); 4168 } 4169 } 4170 } 4171 4172 static void handle_hpd_irq(void *param) 4173 { 4174 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 4175 4176 handle_hpd_irq_helper(aconnector); 4177 4178 } 4179 4180 static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, 4181 union hpd_irq_data hpd_irq_data) 4182 { 4183 struct hpd_rx_irq_offload_work *offload_work = kzalloc_obj(*offload_work); 4184 4185 if (!offload_work) { 4186 drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); 4187 return; 4188 } 4189 4190 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 4191 offload_work->data = hpd_irq_data; 4192 offload_work->offload_wq = offload_wq; 4193 offload_work->adev = adev; 4194 4195 queue_work(offload_wq->wq, &offload_work->work); 4196 drm_dbg_kms(adev_to_drm(adev), "queue work to handle hpd_rx offload work"); 4197 } 4198 4199 static void handle_hpd_rx_irq(void *param) 4200 { 4201 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 4202 struct drm_connector *connector = &aconnector->base; 4203 struct drm_device *dev = connector->dev; 4204 struct dc_link *dc_link = aconnector->dc_link; 4205 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 4206 bool result = false; 4207 enum dc_connection_type new_connection_type = dc_connection_none; 4208 struct amdgpu_device *adev = drm_to_adev(dev); 4209 union hpd_irq_data hpd_irq_data; 4210 bool link_loss = false; 4211 bool has_left_work = false; 4212 int idx = dc_link->link_index; 4213 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 4214 struct dc *dc = aconnector->dc_link->ctx->dc; 4215 4216 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 4217 4218 if (adev->dm.disable_hpd_irq) 4219 return; 4220 4221 /* 4222 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 4223 * conflict, after implement i2c helper, this mutex should be 4224 * retired. 4225 */ 4226 mutex_lock(&aconnector->hpd_lock); 4227 4228 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 4229 &link_loss, true, &has_left_work); 4230 4231 if (!has_left_work) 4232 goto out; 4233 4234 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 4235 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 4236 goto out; 4237 } 4238 4239 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 4240 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 4241 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 4242 bool skip = false; 4243 4244 /* 4245 * DOWN_REP_MSG_RDY is also handled by polling method 4246 * mgr->cbs->poll_hpd_irq() 4247 */ 4248 spin_lock(&offload_wq->offload_lock); 4249 skip = offload_wq->is_handling_mst_msg_rdy_event; 4250 4251 if (!skip) 4252 offload_wq->is_handling_mst_msg_rdy_event = true; 4253 4254 spin_unlock(&offload_wq->offload_lock); 4255 4256 if (!skip) 4257 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 4258 4259 goto out; 4260 } 4261 4262 if (link_loss) { 4263 bool skip = false; 4264 4265 spin_lock(&offload_wq->offload_lock); 4266 skip = offload_wq->is_handling_link_loss; 4267 4268 if (!skip) 4269 offload_wq->is_handling_link_loss = true; 4270 4271 spin_unlock(&offload_wq->offload_lock); 4272 4273 if (!skip) 4274 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 4275 4276 goto out; 4277 } 4278 } 4279 4280 out: 4281 if (result && !is_mst_root_connector) { 4282 /* Downstream Port status changed. */ 4283 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 4284 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 4285 4286 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4287 emulated_link_detect(dc_link); 4288 4289 if (aconnector->fake_enable) 4290 aconnector->fake_enable = false; 4291 4292 amdgpu_dm_update_connector_after_detect(aconnector); 4293 4294 4295 drm_modeset_lock_all(dev); 4296 dm_restore_drm_connector_state(dev, connector); 4297 drm_modeset_unlock_all(dev); 4298 4299 drm_kms_helper_connector_hotplug_event(connector); 4300 } else { 4301 bool ret = false; 4302 4303 mutex_lock(&adev->dm.dc_lock); 4304 dc_exit_ips_for_hw_access(dc); 4305 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 4306 mutex_unlock(&adev->dm.dc_lock); 4307 4308 if (ret) { 4309 if (aconnector->fake_enable) 4310 aconnector->fake_enable = false; 4311 4312 amdgpu_dm_update_connector_after_detect(aconnector); 4313 4314 drm_modeset_lock_all(dev); 4315 dm_restore_drm_connector_state(dev, connector); 4316 drm_modeset_unlock_all(dev); 4317 4318 drm_kms_helper_connector_hotplug_event(connector); 4319 } 4320 } 4321 } 4322 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 4323 if (adev->dm.hdcp_workqueue) 4324 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 4325 } 4326 4327 if (dc_link->type != dc_connection_mst_branch) 4328 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 4329 4330 mutex_unlock(&aconnector->hpd_lock); 4331 } 4332 4333 static int register_hpd_handlers(struct amdgpu_device *adev) 4334 { 4335 struct drm_device *dev = adev_to_drm(adev); 4336 struct drm_connector *connector; 4337 struct amdgpu_dm_connector *aconnector; 4338 const struct dc_link *dc_link; 4339 struct dc_interrupt_params int_params = {0}; 4340 4341 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4342 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4343 4344 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 4345 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, 4346 dmub_hpd_callback, true)) { 4347 drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); 4348 return -EINVAL; 4349 } 4350 4351 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, 4352 dmub_hpd_callback, true)) { 4353 drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); 4354 return -EINVAL; 4355 } 4356 4357 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 4358 dmub_hpd_sense_callback, true)) { 4359 drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback"); 4360 return -EINVAL; 4361 } 4362 } 4363 4364 list_for_each_entry(connector, 4365 &dev->mode_config.connector_list, head) { 4366 4367 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 4368 continue; 4369 4370 aconnector = to_amdgpu_dm_connector(connector); 4371 dc_link = aconnector->dc_link; 4372 4373 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 4374 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4375 int_params.irq_source = dc_link->irq_source_hpd; 4376 4377 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4378 int_params.irq_source < DC_IRQ_SOURCE_HPD1 || 4379 int_params.irq_source > DC_IRQ_SOURCE_HPD6) { 4380 drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n"); 4381 return -EINVAL; 4382 } 4383 4384 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4385 handle_hpd_irq, (void *) aconnector)) 4386 return -ENOMEM; 4387 } 4388 4389 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 4390 4391 /* Also register for DP short pulse (hpd_rx). */ 4392 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4393 int_params.irq_source = dc_link->irq_source_hpd_rx; 4394 4395 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4396 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || 4397 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { 4398 drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n"); 4399 return -EINVAL; 4400 } 4401 4402 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4403 handle_hpd_rx_irq, (void *) aconnector)) 4404 return -ENOMEM; 4405 } 4406 } 4407 return 0; 4408 } 4409 4410 /* Register IRQ sources and initialize IRQ callbacks */ 4411 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 4412 { 4413 struct dc *dc = adev->dm.dc; 4414 struct common_irq_params *c_irq_params; 4415 struct dc_interrupt_params int_params = {0}; 4416 int r; 4417 int i; 4418 unsigned int src_id; 4419 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4420 /* Use different interrupts for VBLANK on DCE 6 vs. newer. */ 4421 const unsigned int vblank_d1 = 4422 adev->dm.dc->ctx->dce_version >= DCE_VERSION_8_0 4423 ? VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 : 1; 4424 4425 if (adev->family >= AMDGPU_FAMILY_AI) 4426 client_id = SOC15_IH_CLIENTID_DCE; 4427 4428 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4429 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4430 4431 /* 4432 * Actions of amdgpu_irq_add_id(): 4433 * 1. Register a set() function with base driver. 4434 * Base driver will call set() function to enable/disable an 4435 * interrupt in DC hardware. 4436 * 2. Register amdgpu_dm_irq_handler(). 4437 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4438 * coming from DC hardware. 4439 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4440 * for acknowledging and handling. 4441 */ 4442 4443 /* Use VBLANK interrupt */ 4444 for (i = 0; i < adev->mode_info.num_crtc; i++) { 4445 src_id = vblank_d1 + i; 4446 r = amdgpu_irq_add_id(adev, client_id, src_id, &adev->crtc_irq); 4447 if (r) { 4448 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); 4449 return r; 4450 } 4451 4452 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4453 int_params.irq_source = 4454 dc_interrupt_to_irq_source(dc, src_id, 0); 4455 4456 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4457 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4458 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4459 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); 4460 return -EINVAL; 4461 } 4462 4463 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4464 4465 c_irq_params->adev = adev; 4466 c_irq_params->irq_src = int_params.irq_source; 4467 4468 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4469 dm_crtc_high_irq, c_irq_params)) 4470 return -ENOMEM; 4471 } 4472 4473 if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { 4474 /* Use VUPDATE interrupt */ 4475 for (i = 0; i < adev->mode_info.num_crtc; i++) { 4476 src_id = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT + i * 2; 4477 r = amdgpu_irq_add_id(adev, client_id, src_id, &adev->vupdate_irq); 4478 if (r) { 4479 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); 4480 return r; 4481 } 4482 4483 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4484 int_params.irq_source = 4485 dc_interrupt_to_irq_source(dc, src_id, 0); 4486 4487 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4488 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4489 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4490 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); 4491 return -EINVAL; 4492 } 4493 4494 c_irq_params = &adev->dm.vupdate_params[ 4495 int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4496 c_irq_params->adev = adev; 4497 c_irq_params->irq_src = int_params.irq_source; 4498 4499 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4500 dm_vupdate_high_irq, c_irq_params)) 4501 return -ENOMEM; 4502 } 4503 } 4504 4505 /* Use GRPH_PFLIP interrupt */ 4506 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4507 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4508 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4509 if (r) { 4510 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); 4511 return r; 4512 } 4513 4514 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4515 int_params.irq_source = 4516 dc_interrupt_to_irq_source(dc, i, 0); 4517 4518 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4519 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4520 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4521 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); 4522 return -EINVAL; 4523 } 4524 4525 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4526 4527 c_irq_params->adev = adev; 4528 c_irq_params->irq_src = int_params.irq_source; 4529 4530 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4531 dm_pflip_high_irq, c_irq_params)) 4532 return -ENOMEM; 4533 } 4534 4535 /* HPD */ 4536 r = amdgpu_irq_add_id(adev, client_id, 4537 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4538 if (r) { 4539 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); 4540 return r; 4541 } 4542 4543 r = register_hpd_handlers(adev); 4544 4545 return r; 4546 } 4547 4548 /* Register IRQ sources and initialize IRQ callbacks */ 4549 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 4550 { 4551 struct dc *dc = adev->dm.dc; 4552 struct common_irq_params *c_irq_params; 4553 struct dc_interrupt_params int_params = {0}; 4554 int r; 4555 int i; 4556 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4557 static const unsigned int vrtl_int_srcid[] = { 4558 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 4559 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 4560 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 4561 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 4562 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 4563 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 4564 }; 4565 #endif 4566 4567 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4568 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4569 4570 /* 4571 * Actions of amdgpu_irq_add_id(): 4572 * 1. Register a set() function with base driver. 4573 * Base driver will call set() function to enable/disable an 4574 * interrupt in DC hardware. 4575 * 2. Register amdgpu_dm_irq_handler(). 4576 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4577 * coming from DC hardware. 4578 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4579 * for acknowledging and handling. 4580 */ 4581 4582 /* Use VSTARTUP interrupt */ 4583 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 4584 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 4585 i++) { 4586 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 4587 4588 if (r) { 4589 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); 4590 return r; 4591 } 4592 4593 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4594 int_params.irq_source = 4595 dc_interrupt_to_irq_source(dc, i, 0); 4596 4597 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4598 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4599 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4600 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); 4601 return -EINVAL; 4602 } 4603 4604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4605 4606 c_irq_params->adev = adev; 4607 c_irq_params->irq_src = int_params.irq_source; 4608 4609 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4610 dm_crtc_high_irq, c_irq_params)) 4611 return -ENOMEM; 4612 } 4613 4614 /* Use otg vertical line interrupt */ 4615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4616 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 4617 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 4618 vrtl_int_srcid[i], &adev->vline0_irq); 4619 4620 if (r) { 4621 drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n"); 4622 return r; 4623 } 4624 4625 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4626 int_params.irq_source = 4627 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 4628 4629 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4630 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || 4631 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { 4632 drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n"); 4633 return -EINVAL; 4634 } 4635 4636 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 4637 - DC_IRQ_SOURCE_DC1_VLINE0]; 4638 4639 c_irq_params->adev = adev; 4640 c_irq_params->irq_src = int_params.irq_source; 4641 4642 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4643 dm_dcn_vertical_interrupt0_high_irq, 4644 c_irq_params)) 4645 return -ENOMEM; 4646 } 4647 #endif 4648 4649 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 4650 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 4651 * to trigger at end of each vblank, regardless of state of the lock, 4652 * matching DCE behaviour. 4653 */ 4654 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 4655 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 4656 i++) { 4657 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 4658 4659 if (r) { 4660 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); 4661 return r; 4662 } 4663 4664 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4665 int_params.irq_source = 4666 dc_interrupt_to_irq_source(dc, i, 0); 4667 4668 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4669 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4670 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4671 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); 4672 return -EINVAL; 4673 } 4674 4675 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4676 4677 c_irq_params->adev = adev; 4678 c_irq_params->irq_src = int_params.irq_source; 4679 4680 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4681 dm_vupdate_high_irq, c_irq_params)) 4682 return -ENOMEM; 4683 } 4684 4685 /* Use GRPH_PFLIP interrupt */ 4686 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 4687 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 4688 i++) { 4689 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 4690 if (r) { 4691 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); 4692 return r; 4693 } 4694 4695 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4696 int_params.irq_source = 4697 dc_interrupt_to_irq_source(dc, i, 0); 4698 4699 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4700 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4701 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4702 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); 4703 return -EINVAL; 4704 } 4705 4706 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4707 4708 c_irq_params->adev = adev; 4709 c_irq_params->irq_src = int_params.irq_source; 4710 4711 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4712 dm_pflip_high_irq, c_irq_params)) 4713 return -ENOMEM; 4714 } 4715 4716 /* HPD */ 4717 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 4718 &adev->hpd_irq); 4719 if (r) { 4720 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); 4721 return r; 4722 } 4723 4724 r = register_hpd_handlers(adev); 4725 4726 return r; 4727 } 4728 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 4729 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 4730 { 4731 struct dc *dc = adev->dm.dc; 4732 struct common_irq_params *c_irq_params; 4733 struct dc_interrupt_params int_params = {0}; 4734 int r, i; 4735 4736 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4737 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4738 4739 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 4740 &adev->dmub_outbox_irq); 4741 if (r) { 4742 drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n"); 4743 return r; 4744 } 4745 4746 if (dc->ctx->dmub_srv) { 4747 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 4748 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4749 int_params.irq_source = 4750 dc_interrupt_to_irq_source(dc, i, 0); 4751 4752 c_irq_params = &adev->dm.dmub_outbox_params[0]; 4753 4754 c_irq_params->adev = adev; 4755 c_irq_params->irq_src = int_params.irq_source; 4756 4757 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4758 dm_dmub_outbox1_low_irq, c_irq_params)) 4759 return -ENOMEM; 4760 } 4761 4762 return 0; 4763 } 4764 4765 /* 4766 * Acquires the lock for the atomic state object and returns 4767 * the new atomic state. 4768 * 4769 * This should only be called during atomic check. 4770 */ 4771 int dm_atomic_get_state(struct drm_atomic_state *state, 4772 struct dm_atomic_state **dm_state) 4773 { 4774 struct drm_device *dev = state->dev; 4775 struct amdgpu_device *adev = drm_to_adev(dev); 4776 struct amdgpu_display_manager *dm = &adev->dm; 4777 struct drm_private_state *priv_state; 4778 4779 if (*dm_state) 4780 return 0; 4781 4782 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 4783 if (IS_ERR(priv_state)) 4784 return PTR_ERR(priv_state); 4785 4786 *dm_state = to_dm_atomic_state(priv_state); 4787 4788 return 0; 4789 } 4790 4791 static struct dm_atomic_state * 4792 dm_atomic_get_new_state(struct drm_atomic_state *state) 4793 { 4794 struct drm_device *dev = state->dev; 4795 struct amdgpu_device *adev = drm_to_adev(dev); 4796 struct amdgpu_display_manager *dm = &adev->dm; 4797 struct drm_private_obj *obj; 4798 struct drm_private_state *new_obj_state; 4799 int i; 4800 4801 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 4802 if (obj->funcs == dm->atomic_obj.funcs) 4803 return to_dm_atomic_state(new_obj_state); 4804 } 4805 4806 return NULL; 4807 } 4808 4809 static struct drm_private_state * 4810 dm_atomic_duplicate_state(struct drm_private_obj *obj) 4811 { 4812 struct dm_atomic_state *old_state, *new_state; 4813 4814 new_state = kzalloc_obj(*new_state); 4815 if (!new_state) 4816 return NULL; 4817 4818 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 4819 4820 old_state = to_dm_atomic_state(obj->state); 4821 4822 if (old_state && old_state->context) 4823 new_state->context = dc_state_create_copy(old_state->context); 4824 4825 if (!new_state->context) { 4826 kfree(new_state); 4827 return NULL; 4828 } 4829 4830 return &new_state->base; 4831 } 4832 4833 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 4834 struct drm_private_state *state) 4835 { 4836 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4837 4838 if (dm_state && dm_state->context) 4839 dc_state_release(dm_state->context); 4840 4841 kfree(dm_state); 4842 } 4843 4844 static struct drm_private_state_funcs dm_atomic_state_funcs = { 4845 .atomic_duplicate_state = dm_atomic_duplicate_state, 4846 .atomic_destroy_state = dm_atomic_destroy_state, 4847 }; 4848 4849 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 4850 { 4851 struct dm_atomic_state *state; 4852 int r; 4853 4854 adev->mode_info.mode_config_initialized = true; 4855 4856 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 4857 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 4858 4859 adev_to_drm(adev)->mode_config.max_width = 16384; 4860 adev_to_drm(adev)->mode_config.max_height = 16384; 4861 4862 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4863 if (adev->asic_type == CHIP_HAWAII) 4864 /* disable prefer shadow for now due to hibernation issues */ 4865 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4866 else 4867 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4868 /* indicates support for immediate flip */ 4869 adev_to_drm(adev)->mode_config.async_page_flip = true; 4870 4871 state = kzalloc_obj(*state); 4872 if (!state) 4873 return -ENOMEM; 4874 4875 state->context = dc_state_create_current_copy(adev->dm.dc); 4876 if (!state->context) { 4877 kfree(state); 4878 return -ENOMEM; 4879 } 4880 4881 drm_atomic_private_obj_init(adev_to_drm(adev), 4882 &adev->dm.atomic_obj, 4883 &state->base, 4884 &dm_atomic_state_funcs); 4885 4886 r = amdgpu_display_modeset_create_props(adev); 4887 if (r) { 4888 dc_state_release(state->context); 4889 kfree(state); 4890 return r; 4891 } 4892 4893 #ifdef AMD_PRIVATE_COLOR 4894 if (amdgpu_dm_create_color_properties(adev)) { 4895 dc_state_release(state->context); 4896 kfree(state); 4897 return -ENOMEM; 4898 } 4899 #endif 4900 4901 r = amdgpu_dm_audio_init(adev); 4902 if (r) { 4903 dc_state_release(state->context); 4904 kfree(state); 4905 return r; 4906 } 4907 4908 return 0; 4909 } 4910 4911 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4912 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4913 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) 4914 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4915 4916 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4917 int bl_idx) 4918 { 4919 struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx]; 4920 4921 if (caps->caps_valid) 4922 return; 4923 4924 #if defined(CONFIG_ACPI) 4925 amdgpu_acpi_get_backlight_caps(caps); 4926 4927 /* validate the firmware value is sane */ 4928 if (caps->caps_valid) { 4929 int spread = caps->max_input_signal - caps->min_input_signal; 4930 4931 if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4932 caps->min_input_signal < 0 || 4933 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4934 spread < AMDGPU_DM_MIN_SPREAD) { 4935 drm_dbg_kms(adev_to_drm(dm->adev), "DM: Invalid backlight caps: min=%d, max=%d\n", 4936 caps->min_input_signal, caps->max_input_signal); 4937 caps->caps_valid = false; 4938 } 4939 } 4940 4941 if (!caps->caps_valid) { 4942 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4943 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4944 caps->caps_valid = true; 4945 } 4946 #else 4947 if (caps->aux_support) 4948 return; 4949 4950 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4951 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4952 caps->caps_valid = true; 4953 #endif 4954 } 4955 4956 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4957 unsigned int *min, unsigned int *max) 4958 { 4959 if (!caps) 4960 return 0; 4961 4962 if (caps->aux_support) { 4963 // Firmware limits are in nits, DC API wants millinits. 4964 *max = 1000 * caps->aux_max_input_signal; 4965 *min = 1000 * caps->aux_min_input_signal; 4966 } else { 4967 // Firmware limits are 8-bit, PWM control is 16-bit. 4968 *max = 0x101 * caps->max_input_signal; 4969 *min = 0x101 * caps->min_input_signal; 4970 } 4971 return 1; 4972 } 4973 4974 /* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */ 4975 static inline u32 scale_input_to_fw(int min, int max, u64 input) 4976 { 4977 return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min); 4978 } 4979 4980 /* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */ 4981 static inline u32 scale_fw_to_input(int min, int max, u64 input) 4982 { 4983 return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL); 4984 } 4985 4986 static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, 4987 unsigned int min, unsigned int max, 4988 uint32_t *user_brightness) 4989 { 4990 u32 brightness = scale_input_to_fw(min, max, *user_brightness); 4991 u8 lower_signal, upper_signal, upper_lum, lower_lum, lum; 4992 int left, right; 4993 4994 if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) 4995 return; 4996 4997 if (!caps->data_points) 4998 return; 4999 5000 /* 5001 * Handle the case where brightness is below the first data point 5002 * Interpolate between (0,0) and (first_signal, first_lum) 5003 */ 5004 if (brightness < caps->luminance_data[0].input_signal) { 5005 lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness, 5006 caps->luminance_data[0].input_signal); 5007 goto scale; 5008 } 5009 5010 left = 0; 5011 right = caps->data_points - 1; 5012 while (left <= right) { 5013 int mid = left + (right - left) / 2; 5014 u8 signal = caps->luminance_data[mid].input_signal; 5015 5016 /* Exact match found */ 5017 if (signal == brightness) { 5018 lum = caps->luminance_data[mid].luminance; 5019 goto scale; 5020 } 5021 5022 if (signal < brightness) 5023 left = mid + 1; 5024 else 5025 right = mid - 1; 5026 } 5027 5028 /* verify bound */ 5029 if (left >= caps->data_points) 5030 left = caps->data_points - 1; 5031 5032 /* At this point, left > right */ 5033 lower_signal = caps->luminance_data[right].input_signal; 5034 upper_signal = caps->luminance_data[left].input_signal; 5035 lower_lum = caps->luminance_data[right].luminance; 5036 upper_lum = caps->luminance_data[left].luminance; 5037 5038 /* interpolate */ 5039 if (right == left || !lower_lum) 5040 lum = upper_lum; 5041 else 5042 lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) * 5043 (brightness - lower_signal), 5044 upper_signal - lower_signal); 5045 scale: 5046 *user_brightness = scale_fw_to_input(min, max, 5047 DIV_ROUND_CLOSEST(lum * brightness, 101)); 5048 } 5049 5050 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 5051 uint32_t brightness) 5052 { 5053 unsigned int min, max; 5054 5055 if (!get_brightness_range(caps, &min, &max)) 5056 return brightness; 5057 5058 convert_custom_brightness(caps, min, max, &brightness); 5059 5060 // Rescale 0..max to min..max 5061 return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max); 5062 } 5063 5064 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 5065 uint32_t brightness) 5066 { 5067 unsigned int min, max; 5068 5069 if (!get_brightness_range(caps, &min, &max)) 5070 return brightness; 5071 5072 if (brightness < min) 5073 return 0; 5074 // Rescale min..max to 0..max 5075 return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min), 5076 max - min); 5077 } 5078 5079 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 5080 int bl_idx, 5081 u32 user_brightness) 5082 { 5083 struct amdgpu_dm_backlight_caps *caps; 5084 struct dc_link *link; 5085 u32 brightness; 5086 bool rc, reallow_idle = false; 5087 struct drm_connector *connector; 5088 5089 list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) { 5090 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5091 5092 if (aconnector->bl_idx != bl_idx) 5093 continue; 5094 5095 /* if connector is off, save the brightness for next time it's on */ 5096 if (!aconnector->base.encoder) { 5097 dm->brightness[bl_idx] = user_brightness; 5098 dm->actual_brightness[bl_idx] = 0; 5099 return; 5100 } 5101 } 5102 5103 amdgpu_dm_update_backlight_caps(dm, bl_idx); 5104 caps = &dm->backlight_caps[bl_idx]; 5105 5106 dm->brightness[bl_idx] = user_brightness; 5107 /* update scratch register */ 5108 if (bl_idx == 0) 5109 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 5110 brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); 5111 link = (struct dc_link *)dm->backlight_link[bl_idx]; 5112 5113 /* Apply brightness quirk */ 5114 if (caps->brightness_mask) 5115 brightness |= caps->brightness_mask; 5116 5117 /* Change brightness based on AUX property */ 5118 mutex_lock(&dm->dc_lock); 5119 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { 5120 dc_allow_idle_optimizations(dm->dc, false); 5121 reallow_idle = true; 5122 } 5123 5124 if (trace_amdgpu_dm_brightness_enabled()) { 5125 trace_amdgpu_dm_brightness(__builtin_return_address(0), 5126 user_brightness, 5127 brightness, 5128 caps->aux_support, 5129 power_supply_is_system_supplied() > 0); 5130 } 5131 5132 if (caps->aux_support) { 5133 rc = dc_link_set_backlight_level_nits(link, true, brightness, 5134 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 5135 if (!rc) 5136 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 5137 } else { 5138 struct set_backlight_level_params backlight_level_params = { 0 }; 5139 5140 backlight_level_params.backlight_pwm_u16_16 = brightness; 5141 backlight_level_params.transition_time_in_ms = 0; 5142 5143 rc = dc_link_set_backlight_level(link, &backlight_level_params); 5144 if (!rc) 5145 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 5146 } 5147 5148 if (dm->dc->caps.ips_support && reallow_idle) 5149 dc_allow_idle_optimizations(dm->dc, true); 5150 5151 mutex_unlock(&dm->dc_lock); 5152 5153 if (rc) 5154 dm->actual_brightness[bl_idx] = user_brightness; 5155 } 5156 5157 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 5158 { 5159 struct amdgpu_display_manager *dm = bl_get_data(bd); 5160 int i; 5161 5162 for (i = 0; i < dm->num_of_edps; i++) { 5163 if (bd == dm->backlight_dev[i]) 5164 break; 5165 } 5166 if (i >= AMDGPU_DM_MAX_NUM_EDP) 5167 i = 0; 5168 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 5169 5170 return 0; 5171 } 5172 5173 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 5174 int bl_idx) 5175 { 5176 int ret; 5177 struct amdgpu_dm_backlight_caps caps; 5178 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 5179 5180 amdgpu_dm_update_backlight_caps(dm, bl_idx); 5181 caps = dm->backlight_caps[bl_idx]; 5182 5183 if (caps.aux_support) { 5184 u32 avg, peak; 5185 5186 if (!dc_link_get_backlight_level_nits(link, &avg, &peak)) 5187 return dm->brightness[bl_idx]; 5188 return convert_brightness_to_user(&caps, avg); 5189 } 5190 5191 ret = dc_link_get_backlight_level(link); 5192 5193 if (ret == DC_ERROR_UNEXPECTED) 5194 return dm->brightness[bl_idx]; 5195 5196 return convert_brightness_to_user(&caps, ret); 5197 } 5198 5199 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 5200 { 5201 struct amdgpu_display_manager *dm = bl_get_data(bd); 5202 int i; 5203 5204 for (i = 0; i < dm->num_of_edps; i++) { 5205 if (bd == dm->backlight_dev[i]) 5206 break; 5207 } 5208 if (i >= AMDGPU_DM_MAX_NUM_EDP) 5209 i = 0; 5210 return amdgpu_dm_backlight_get_level(dm, i); 5211 } 5212 5213 static const struct backlight_ops amdgpu_dm_backlight_ops = { 5214 .options = BL_CORE_SUSPENDRESUME, 5215 .get_brightness = amdgpu_dm_backlight_get_brightness, 5216 .update_status = amdgpu_dm_backlight_update_status, 5217 }; 5218 5219 static void 5220 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 5221 { 5222 struct drm_device *drm = aconnector->base.dev; 5223 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 5224 struct backlight_properties props = { 0 }; 5225 struct amdgpu_dm_backlight_caps *caps; 5226 char bl_name[16]; 5227 int min, max; 5228 int real_brightness; 5229 int init_brightness; 5230 5231 if (aconnector->bl_idx == -1) 5232 return; 5233 5234 if (!acpi_video_backlight_use_native()) { 5235 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 5236 /* Try registering an ACPI video backlight device instead. */ 5237 acpi_video_register_backlight(); 5238 return; 5239 } 5240 5241 caps = &dm->backlight_caps[aconnector->bl_idx]; 5242 if (get_brightness_range(caps, &min, &max)) { 5243 if (power_supply_is_system_supplied() > 0) 5244 props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); 5245 else 5246 props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); 5247 /* min is zero, so max needs to be adjusted */ 5248 props.max_brightness = max - min; 5249 drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, 5250 caps->ac_level, caps->dc_level); 5251 } else 5252 props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; 5253 5254 init_brightness = props.brightness; 5255 5256 if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) { 5257 drm_info(drm, "Using custom brightness curve\n"); 5258 props.scale = BACKLIGHT_SCALE_NON_LINEAR; 5259 } else 5260 props.scale = BACKLIGHT_SCALE_LINEAR; 5261 props.type = BACKLIGHT_RAW; 5262 5263 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 5264 drm->primary->index + aconnector->bl_idx); 5265 5266 dm->backlight_dev[aconnector->bl_idx] = 5267 backlight_device_register(bl_name, aconnector->base.kdev, dm, 5268 &amdgpu_dm_backlight_ops, &props); 5269 dm->brightness[aconnector->bl_idx] = props.brightness; 5270 5271 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 5272 drm_err(drm, "DM: Backlight registration failed!\n"); 5273 dm->backlight_dev[aconnector->bl_idx] = NULL; 5274 } else { 5275 /* 5276 * dm->brightness[x] can be inconsistent just after startup until 5277 * ops.get_brightness is called. 5278 */ 5279 real_brightness = 5280 amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]); 5281 5282 if (real_brightness != init_brightness) { 5283 dm->actual_brightness[aconnector->bl_idx] = real_brightness; 5284 dm->brightness[aconnector->bl_idx] = real_brightness; 5285 } 5286 drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); 5287 } 5288 } 5289 5290 static int initialize_plane(struct amdgpu_display_manager *dm, 5291 struct amdgpu_mode_info *mode_info, int plane_id, 5292 enum drm_plane_type plane_type, 5293 const struct dc_plane_cap *plane_cap) 5294 { 5295 struct drm_plane *plane; 5296 unsigned long possible_crtcs; 5297 int ret = 0; 5298 5299 plane = kzalloc_obj(struct drm_plane); 5300 if (!plane) { 5301 drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n"); 5302 return -ENOMEM; 5303 } 5304 plane->type = plane_type; 5305 5306 /* 5307 * HACK: IGT tests expect that the primary plane for a CRTC 5308 * can only have one possible CRTC. Only expose support for 5309 * any CRTC if they're not going to be used as a primary plane 5310 * for a CRTC - like overlay or underlay planes. 5311 */ 5312 possible_crtcs = 1 << plane_id; 5313 if (plane_id >= dm->dc->caps.max_streams) 5314 possible_crtcs = 0xff; 5315 5316 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 5317 5318 if (ret) { 5319 drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n"); 5320 kfree(plane); 5321 return ret; 5322 } 5323 5324 if (mode_info) 5325 mode_info->planes[plane_id] = plane; 5326 5327 return ret; 5328 } 5329 5330 5331 static void setup_backlight_device(struct amdgpu_display_manager *dm, 5332 struct amdgpu_dm_connector *aconnector) 5333 { 5334 struct amdgpu_dm_backlight_caps *caps; 5335 struct dc_link *link = aconnector->dc_link; 5336 int bl_idx = dm->num_of_edps; 5337 5338 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 5339 link->type == dc_connection_none) 5340 return; 5341 5342 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 5343 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 5344 return; 5345 } 5346 5347 aconnector->bl_idx = bl_idx; 5348 5349 amdgpu_dm_update_backlight_caps(dm, bl_idx); 5350 dm->backlight_link[bl_idx] = link; 5351 dm->num_of_edps++; 5352 5353 update_connector_ext_caps(aconnector); 5354 caps = &dm->backlight_caps[aconnector->bl_idx]; 5355 5356 /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */ 5357 if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0) 5358 drm_object_attach_property(&aconnector->base.base, 5359 dm->adev->mode_info.abm_level_property, 5360 ABM_SYSFS_CONTROL); 5361 } 5362 5363 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 5364 5365 /* 5366 * In this architecture, the association 5367 * connector -> encoder -> crtc 5368 * id not really requried. The crtc and connector will hold the 5369 * display_index as an abstraction to use with DAL component 5370 * 5371 * Returns 0 on success 5372 */ 5373 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 5374 { 5375 struct amdgpu_display_manager *dm = &adev->dm; 5376 s32 i; 5377 struct amdgpu_dm_connector *aconnector = NULL; 5378 struct amdgpu_encoder *aencoder = NULL; 5379 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5380 u32 link_cnt; 5381 s32 primary_planes; 5382 enum dc_connection_type new_connection_type = dc_connection_none; 5383 const struct dc_plane_cap *plane; 5384 bool psr_feature_enabled = false; 5385 bool replay_feature_enabled = false; 5386 int max_overlay = dm->dc->caps.max_slave_planes; 5387 5388 dm->display_indexes_num = dm->dc->caps.max_streams; 5389 /* Update the actual used number of crtc */ 5390 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 5391 5392 amdgpu_dm_set_irq_funcs(adev); 5393 5394 link_cnt = dm->dc->caps.max_links; 5395 if (amdgpu_dm_mode_config_init(dm->adev)) { 5396 drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n"); 5397 return -EINVAL; 5398 } 5399 5400 /* There is one primary plane per CRTC */ 5401 primary_planes = dm->dc->caps.max_streams; 5402 if (primary_planes > AMDGPU_MAX_PLANES) { 5403 drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n"); 5404 return -EINVAL; 5405 } 5406 5407 /* 5408 * Initialize primary planes, implicit planes for legacy IOCTLS. 5409 * Order is reversed to match iteration order in atomic check. 5410 */ 5411 for (i = (primary_planes - 1); i >= 0; i--) { 5412 plane = &dm->dc->caps.planes[i]; 5413 5414 if (initialize_plane(dm, mode_info, i, 5415 DRM_PLANE_TYPE_PRIMARY, plane)) { 5416 drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n"); 5417 goto fail; 5418 } 5419 } 5420 5421 /* 5422 * Initialize overlay planes, index starting after primary planes. 5423 * These planes have a higher DRM index than the primary planes since 5424 * they should be considered as having a higher z-order. 5425 * Order is reversed to match iteration order in atomic check. 5426 * 5427 * Only support DCN for now, and only expose one so we don't encourage 5428 * userspace to use up all the pipes. 5429 */ 5430 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 5431 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 5432 5433 /* Do not create overlay if MPO disabled */ 5434 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 5435 break; 5436 5437 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 5438 continue; 5439 5440 if (!plane->pixel_format_support.argb8888) 5441 continue; 5442 5443 if (max_overlay-- == 0) 5444 break; 5445 5446 if (initialize_plane(dm, NULL, primary_planes + i, 5447 DRM_PLANE_TYPE_OVERLAY, plane)) { 5448 drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n"); 5449 goto fail; 5450 } 5451 } 5452 5453 for (i = 0; i < dm->dc->caps.max_streams; i++) 5454 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 5455 drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n"); 5456 goto fail; 5457 } 5458 5459 /* Use Outbox interrupt */ 5460 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5461 case IP_VERSION(3, 0, 0): 5462 case IP_VERSION(3, 1, 2): 5463 case IP_VERSION(3, 1, 3): 5464 case IP_VERSION(3, 1, 4): 5465 case IP_VERSION(3, 1, 5): 5466 case IP_VERSION(3, 1, 6): 5467 case IP_VERSION(3, 2, 0): 5468 case IP_VERSION(3, 2, 1): 5469 case IP_VERSION(2, 1, 0): 5470 case IP_VERSION(3, 5, 0): 5471 case IP_VERSION(3, 5, 1): 5472 case IP_VERSION(3, 6, 0): 5473 case IP_VERSION(4, 0, 1): 5474 case IP_VERSION(4, 2, 0): 5475 if (register_outbox_irq_handlers(dm->adev)) { 5476 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5477 goto fail; 5478 } 5479 break; 5480 default: 5481 drm_dbg_kms(adev_to_drm(adev), "Unsupported DCN IP version for outbox: 0x%X\n", 5482 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5483 } 5484 5485 /* Determine whether to enable PSR support by default. */ 5486 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 5487 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5488 case IP_VERSION(3, 1, 2): 5489 case IP_VERSION(3, 1, 3): 5490 case IP_VERSION(3, 1, 4): 5491 case IP_VERSION(3, 1, 5): 5492 case IP_VERSION(3, 1, 6): 5493 case IP_VERSION(3, 2, 0): 5494 case IP_VERSION(3, 2, 1): 5495 case IP_VERSION(3, 5, 0): 5496 case IP_VERSION(3, 5, 1): 5497 case IP_VERSION(3, 6, 0): 5498 case IP_VERSION(4, 0, 1): 5499 case IP_VERSION(4, 2, 0): 5500 psr_feature_enabled = true; 5501 break; 5502 default: 5503 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 5504 break; 5505 } 5506 } 5507 5508 /* Determine whether to enable Replay support by default. */ 5509 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 5510 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5511 case IP_VERSION(3, 1, 4): 5512 case IP_VERSION(3, 2, 0): 5513 case IP_VERSION(3, 2, 1): 5514 case IP_VERSION(3, 5, 0): 5515 case IP_VERSION(3, 5, 1): 5516 case IP_VERSION(3, 6, 0): 5517 replay_feature_enabled = true; 5518 break; 5519 5520 default: 5521 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 5522 break; 5523 } 5524 } 5525 5526 if (link_cnt > MAX_LINKS) { 5527 drm_err(adev_to_drm(adev), 5528 "KMS: Cannot support more than %d display indexes\n", 5529 MAX_LINKS); 5530 goto fail; 5531 } 5532 5533 /* loops over all connectors on the board */ 5534 for (i = 0; i < link_cnt; i++) { 5535 struct dc_link *link = NULL; 5536 5537 link = dc_get_link_at_index(dm->dc, i); 5538 5539 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5540 struct amdgpu_dm_wb_connector *wbcon = kzalloc_obj(*wbcon); 5541 5542 if (!wbcon) { 5543 drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n"); 5544 continue; 5545 } 5546 5547 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 5548 drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n"); 5549 kfree(wbcon); 5550 continue; 5551 } 5552 5553 link->psr_settings.psr_feature_enabled = false; 5554 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 5555 5556 continue; 5557 } 5558 5559 aconnector = kzalloc_obj(*aconnector); 5560 if (!aconnector) 5561 goto fail; 5562 5563 aencoder = kzalloc_obj(*aencoder); 5564 if (!aencoder) 5565 goto fail; 5566 5567 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 5568 drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n"); 5569 goto fail; 5570 } 5571 5572 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 5573 drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n"); 5574 goto fail; 5575 } 5576 5577 if (dm->hpd_rx_offload_wq) 5578 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 5579 aconnector; 5580 5581 if (!dc_link_detect_connection_type(link, &new_connection_type)) 5582 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 5583 5584 if (aconnector->base.force && new_connection_type == dc_connection_none) { 5585 emulated_link_detect(link); 5586 amdgpu_dm_update_connector_after_detect(aconnector); 5587 } else { 5588 bool ret = false; 5589 5590 mutex_lock(&dm->dc_lock); 5591 dc_exit_ips_for_hw_access(dm->dc); 5592 ret = dc_link_detect(link, DETECT_REASON_BOOT); 5593 mutex_unlock(&dm->dc_lock); 5594 5595 if (ret) { 5596 amdgpu_dm_update_connector_after_detect(aconnector); 5597 setup_backlight_device(dm, aconnector); 5598 5599 /* Disable PSR if Replay can be enabled */ 5600 if (replay_feature_enabled) 5601 if (amdgpu_dm_set_replay_caps(link, aconnector)) 5602 psr_feature_enabled = false; 5603 5604 if (psr_feature_enabled) { 5605 amdgpu_dm_set_psr_caps(link); 5606 drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", 5607 aconnector->base.name, 5608 link->psr_settings.psr_feature_enabled, 5609 link->psr_settings.psr_version, 5610 link->dpcd_caps.psr_info.psr_version, 5611 link->dpcd_caps.psr_info.psr_dpcd_caps.raw, 5612 link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); 5613 } 5614 } 5615 } 5616 amdgpu_set_panel_orientation(&aconnector->base); 5617 } 5618 5619 /* Debug dump: list all DC links and their associated sinks after detection 5620 * is complete for all connectors. This provides a comprehensive view of the 5621 * final state without repeating the dump for each connector. 5622 */ 5623 amdgpu_dm_dump_links_and_sinks(adev); 5624 5625 /* Software is initialized. Now we can register interrupt handlers. */ 5626 switch (adev->asic_type) { 5627 #if defined(CONFIG_DRM_AMD_DC_SI) 5628 case CHIP_TAHITI: 5629 case CHIP_PITCAIRN: 5630 case CHIP_VERDE: 5631 case CHIP_OLAND: 5632 #endif 5633 case CHIP_BONAIRE: 5634 case CHIP_HAWAII: 5635 case CHIP_KAVERI: 5636 case CHIP_KABINI: 5637 case CHIP_MULLINS: 5638 case CHIP_TONGA: 5639 case CHIP_FIJI: 5640 case CHIP_CARRIZO: 5641 case CHIP_STONEY: 5642 case CHIP_POLARIS11: 5643 case CHIP_POLARIS10: 5644 case CHIP_POLARIS12: 5645 case CHIP_VEGAM: 5646 case CHIP_VEGA10: 5647 case CHIP_VEGA12: 5648 case CHIP_VEGA20: 5649 if (dce110_register_irq_handlers(dm->adev)) { 5650 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5651 goto fail; 5652 } 5653 break; 5654 default: 5655 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5656 case IP_VERSION(1, 0, 0): 5657 case IP_VERSION(1, 0, 1): 5658 case IP_VERSION(2, 0, 2): 5659 case IP_VERSION(2, 0, 3): 5660 case IP_VERSION(2, 0, 0): 5661 case IP_VERSION(2, 1, 0): 5662 case IP_VERSION(3, 0, 0): 5663 case IP_VERSION(3, 0, 2): 5664 case IP_VERSION(3, 0, 3): 5665 case IP_VERSION(3, 0, 1): 5666 case IP_VERSION(3, 1, 2): 5667 case IP_VERSION(3, 1, 3): 5668 case IP_VERSION(3, 1, 4): 5669 case IP_VERSION(3, 1, 5): 5670 case IP_VERSION(3, 1, 6): 5671 case IP_VERSION(3, 2, 0): 5672 case IP_VERSION(3, 2, 1): 5673 case IP_VERSION(3, 5, 0): 5674 case IP_VERSION(3, 5, 1): 5675 case IP_VERSION(3, 6, 0): 5676 case IP_VERSION(4, 0, 1): 5677 case IP_VERSION(4, 2, 0): 5678 if (dcn10_register_irq_handlers(dm->adev)) { 5679 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5680 goto fail; 5681 } 5682 break; 5683 default: 5684 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n", 5685 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5686 goto fail; 5687 } 5688 break; 5689 } 5690 5691 return 0; 5692 fail: 5693 kfree(aencoder); 5694 kfree(aconnector); 5695 5696 return -EINVAL; 5697 } 5698 5699 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 5700 { 5701 if (dm->atomic_obj.state) 5702 drm_atomic_private_obj_fini(&dm->atomic_obj); 5703 } 5704 5705 /****************************************************************************** 5706 * amdgpu_display_funcs functions 5707 *****************************************************************************/ 5708 5709 /* 5710 * dm_bandwidth_update - program display watermarks 5711 * 5712 * @adev: amdgpu_device pointer 5713 * 5714 * Calculate and program the display watermarks and line buffer allocation. 5715 */ 5716 static void dm_bandwidth_update(struct amdgpu_device *adev) 5717 { 5718 /* TODO: implement later */ 5719 } 5720 5721 static const struct amdgpu_display_funcs dm_display_funcs = { 5722 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 5723 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 5724 .backlight_set_level = NULL, /* never called for DC */ 5725 .backlight_get_level = NULL, /* never called for DC */ 5726 .hpd_sense = NULL,/* called unconditionally */ 5727 .hpd_set_polarity = NULL, /* called unconditionally */ 5728 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 5729 .page_flip_get_scanoutpos = 5730 dm_crtc_get_scanoutpos,/* called unconditionally */ 5731 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 5732 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 5733 }; 5734 5735 #if defined(CONFIG_DEBUG_KERNEL_DC) 5736 5737 static ssize_t s3_debug_store(struct device *device, 5738 struct device_attribute *attr, 5739 const char *buf, 5740 size_t count) 5741 { 5742 int ret; 5743 int s3_state; 5744 struct drm_device *drm_dev = dev_get_drvdata(device); 5745 struct amdgpu_device *adev = drm_to_adev(drm_dev); 5746 struct amdgpu_ip_block *ip_block; 5747 5748 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); 5749 if (!ip_block) 5750 return -EINVAL; 5751 5752 ret = kstrtoint(buf, 0, &s3_state); 5753 5754 if (ret == 0) { 5755 if (s3_state) { 5756 dm_resume(ip_block); 5757 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 5758 } else 5759 dm_suspend(ip_block); 5760 } 5761 5762 return ret == 0 ? count : 0; 5763 } 5764 5765 DEVICE_ATTR_WO(s3_debug); 5766 5767 #endif 5768 5769 static int dm_init_microcode(struct amdgpu_device *adev) 5770 { 5771 char *fw_name_dmub; 5772 int r; 5773 5774 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5775 case IP_VERSION(2, 1, 0): 5776 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 5777 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 5778 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 5779 break; 5780 case IP_VERSION(3, 0, 0): 5781 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 5782 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 5783 else 5784 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 5785 break; 5786 case IP_VERSION(3, 0, 1): 5787 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 5788 break; 5789 case IP_VERSION(3, 0, 2): 5790 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 5791 break; 5792 case IP_VERSION(3, 0, 3): 5793 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 5794 break; 5795 case IP_VERSION(3, 1, 2): 5796 case IP_VERSION(3, 1, 3): 5797 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 5798 break; 5799 case IP_VERSION(3, 1, 4): 5800 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 5801 break; 5802 case IP_VERSION(3, 1, 5): 5803 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 5804 break; 5805 case IP_VERSION(3, 1, 6): 5806 fw_name_dmub = FIRMWARE_DCN316_DMUB; 5807 break; 5808 case IP_VERSION(3, 2, 0): 5809 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 5810 break; 5811 case IP_VERSION(3, 2, 1): 5812 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 5813 break; 5814 case IP_VERSION(3, 5, 0): 5815 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 5816 break; 5817 case IP_VERSION(3, 5, 1): 5818 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 5819 break; 5820 case IP_VERSION(3, 6, 0): 5821 fw_name_dmub = FIRMWARE_DCN_36_DMUB; 5822 break; 5823 case IP_VERSION(4, 0, 1): 5824 fw_name_dmub = FIRMWARE_DCN_401_DMUB; 5825 break; 5826 case IP_VERSION(4, 2, 0): 5827 fw_name_dmub = FIRMWARE_DCN_42_DMUB; 5828 break; 5829 default: 5830 /* ASIC doesn't support DMUB. */ 5831 return 0; 5832 } 5833 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, 5834 "%s", fw_name_dmub); 5835 return r; 5836 } 5837 5838 static int dm_early_init(struct amdgpu_ip_block *ip_block) 5839 { 5840 struct amdgpu_device *adev = ip_block->adev; 5841 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5842 struct atom_context *ctx = mode_info->atom_context; 5843 int index = GetIndexIntoMasterTable(DATA, Object_Header); 5844 u16 data_offset; 5845 5846 /* if there is no object header, skip DM */ 5847 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 5848 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 5849 drm_info(adev_to_drm(adev), "No object header, skipping DM\n"); 5850 return -ENOENT; 5851 } 5852 5853 switch (adev->asic_type) { 5854 #if defined(CONFIG_DRM_AMD_DC_SI) 5855 case CHIP_TAHITI: 5856 case CHIP_PITCAIRN: 5857 case CHIP_VERDE: 5858 adev->mode_info.num_crtc = 6; 5859 adev->mode_info.num_hpd = 6; 5860 adev->mode_info.num_dig = 6; 5861 break; 5862 case CHIP_OLAND: 5863 adev->mode_info.num_crtc = 2; 5864 adev->mode_info.num_hpd = 2; 5865 adev->mode_info.num_dig = 2; 5866 break; 5867 #endif 5868 case CHIP_BONAIRE: 5869 case CHIP_HAWAII: 5870 adev->mode_info.num_crtc = 6; 5871 adev->mode_info.num_hpd = 6; 5872 adev->mode_info.num_dig = 6; 5873 break; 5874 case CHIP_KAVERI: 5875 adev->mode_info.num_crtc = 4; 5876 adev->mode_info.num_hpd = 6; 5877 adev->mode_info.num_dig = 7; 5878 break; 5879 case CHIP_KABINI: 5880 case CHIP_MULLINS: 5881 adev->mode_info.num_crtc = 2; 5882 adev->mode_info.num_hpd = 6; 5883 adev->mode_info.num_dig = 6; 5884 break; 5885 case CHIP_FIJI: 5886 case CHIP_TONGA: 5887 adev->mode_info.num_crtc = 6; 5888 adev->mode_info.num_hpd = 6; 5889 adev->mode_info.num_dig = 7; 5890 break; 5891 case CHIP_CARRIZO: 5892 adev->mode_info.num_crtc = 3; 5893 adev->mode_info.num_hpd = 6; 5894 adev->mode_info.num_dig = 9; 5895 break; 5896 case CHIP_STONEY: 5897 adev->mode_info.num_crtc = 2; 5898 adev->mode_info.num_hpd = 6; 5899 adev->mode_info.num_dig = 9; 5900 break; 5901 case CHIP_POLARIS11: 5902 case CHIP_POLARIS12: 5903 adev->mode_info.num_crtc = 5; 5904 adev->mode_info.num_hpd = 5; 5905 adev->mode_info.num_dig = 5; 5906 break; 5907 case CHIP_POLARIS10: 5908 case CHIP_VEGAM: 5909 adev->mode_info.num_crtc = 6; 5910 adev->mode_info.num_hpd = 6; 5911 adev->mode_info.num_dig = 6; 5912 break; 5913 case CHIP_VEGA10: 5914 case CHIP_VEGA12: 5915 case CHIP_VEGA20: 5916 adev->mode_info.num_crtc = 6; 5917 adev->mode_info.num_hpd = 6; 5918 adev->mode_info.num_dig = 6; 5919 break; 5920 default: 5921 5922 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5923 case IP_VERSION(2, 0, 2): 5924 case IP_VERSION(3, 0, 0): 5925 adev->mode_info.num_crtc = 6; 5926 adev->mode_info.num_hpd = 6; 5927 adev->mode_info.num_dig = 6; 5928 break; 5929 case IP_VERSION(2, 0, 0): 5930 case IP_VERSION(3, 0, 2): 5931 adev->mode_info.num_crtc = 5; 5932 adev->mode_info.num_hpd = 5; 5933 adev->mode_info.num_dig = 5; 5934 break; 5935 case IP_VERSION(2, 0, 3): 5936 case IP_VERSION(3, 0, 3): 5937 adev->mode_info.num_crtc = 2; 5938 adev->mode_info.num_hpd = 2; 5939 adev->mode_info.num_dig = 2; 5940 break; 5941 case IP_VERSION(1, 0, 0): 5942 case IP_VERSION(1, 0, 1): 5943 case IP_VERSION(3, 0, 1): 5944 case IP_VERSION(2, 1, 0): 5945 case IP_VERSION(3, 1, 2): 5946 case IP_VERSION(3, 1, 3): 5947 case IP_VERSION(3, 1, 4): 5948 case IP_VERSION(3, 1, 5): 5949 case IP_VERSION(3, 1, 6): 5950 case IP_VERSION(3, 2, 0): 5951 case IP_VERSION(3, 2, 1): 5952 case IP_VERSION(3, 5, 0): 5953 case IP_VERSION(3, 5, 1): 5954 case IP_VERSION(3, 6, 0): 5955 case IP_VERSION(4, 0, 1): 5956 case IP_VERSION(4, 2, 0): 5957 adev->mode_info.num_crtc = 4; 5958 adev->mode_info.num_hpd = 4; 5959 adev->mode_info.num_dig = 4; 5960 break; 5961 default: 5962 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n", 5963 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5964 return -EINVAL; 5965 } 5966 break; 5967 } 5968 5969 if (adev->mode_info.funcs == NULL) 5970 adev->mode_info.funcs = &dm_display_funcs; 5971 5972 /* 5973 * Note: Do NOT change adev->reg.audio_endpt.rreg and 5974 * adev->reg.audio_endpt.wreg because they are initialised in 5975 * amdgpu_device_init() 5976 */ 5977 #if defined(CONFIG_DEBUG_KERNEL_DC) 5978 device_create_file( 5979 adev_to_drm(adev)->dev, 5980 &dev_attr_s3_debug); 5981 #endif 5982 adev->dc_enabled = true; 5983 5984 return dm_init_microcode(adev); 5985 } 5986 5987 static bool modereset_required(struct drm_crtc_state *crtc_state) 5988 { 5989 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 5990 } 5991 5992 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 5993 { 5994 drm_encoder_cleanup(encoder); 5995 kfree(encoder); 5996 } 5997 5998 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 5999 .destroy = amdgpu_dm_encoder_destroy, 6000 }; 6001 6002 static int 6003 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 6004 const enum surface_pixel_format format, 6005 enum dc_color_space *color_space) 6006 { 6007 bool full_range; 6008 6009 *color_space = COLOR_SPACE_SRGB; 6010 6011 /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ 6012 if (plane_state->state && plane_state->state->plane_color_pipeline) 6013 return 0; 6014 6015 /* DRM color properties only affect non-RGB formats. */ 6016 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 6017 return 0; 6018 6019 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 6020 6021 switch (plane_state->color_encoding) { 6022 case DRM_COLOR_YCBCR_BT601: 6023 if (full_range) 6024 *color_space = COLOR_SPACE_YCBCR601; 6025 else 6026 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 6027 break; 6028 6029 case DRM_COLOR_YCBCR_BT709: 6030 if (full_range) 6031 *color_space = COLOR_SPACE_YCBCR709; 6032 else 6033 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 6034 break; 6035 6036 case DRM_COLOR_YCBCR_BT2020: 6037 if (full_range) 6038 *color_space = COLOR_SPACE_2020_YCBCR_FULL; 6039 else 6040 *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; 6041 break; 6042 6043 default: 6044 return -EINVAL; 6045 } 6046 6047 return 0; 6048 } 6049 6050 static int 6051 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 6052 const struct drm_plane_state *plane_state, 6053 const u64 tiling_flags, 6054 struct dc_plane_info *plane_info, 6055 struct dc_plane_address *address, 6056 bool tmz_surface) 6057 { 6058 const struct drm_framebuffer *fb = plane_state->fb; 6059 const struct amdgpu_framebuffer *afb = 6060 to_amdgpu_framebuffer(plane_state->fb); 6061 int ret; 6062 6063 memset(plane_info, 0, sizeof(*plane_info)); 6064 6065 switch (fb->format->format) { 6066 case DRM_FORMAT_C8: 6067 plane_info->format = 6068 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 6069 break; 6070 case DRM_FORMAT_RGB565: 6071 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 6072 break; 6073 case DRM_FORMAT_XRGB8888: 6074 case DRM_FORMAT_ARGB8888: 6075 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 6076 break; 6077 case DRM_FORMAT_XRGB2101010: 6078 case DRM_FORMAT_ARGB2101010: 6079 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 6080 break; 6081 case DRM_FORMAT_XBGR2101010: 6082 case DRM_FORMAT_ABGR2101010: 6083 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 6084 break; 6085 case DRM_FORMAT_XBGR8888: 6086 case DRM_FORMAT_ABGR8888: 6087 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 6088 break; 6089 case DRM_FORMAT_NV21: 6090 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 6091 break; 6092 case DRM_FORMAT_NV12: 6093 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 6094 break; 6095 case DRM_FORMAT_P010: 6096 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 6097 break; 6098 case DRM_FORMAT_XRGB16161616F: 6099 case DRM_FORMAT_ARGB16161616F: 6100 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 6101 break; 6102 case DRM_FORMAT_XBGR16161616F: 6103 case DRM_FORMAT_ABGR16161616F: 6104 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 6105 break; 6106 case DRM_FORMAT_XRGB16161616: 6107 case DRM_FORMAT_ARGB16161616: 6108 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 6109 break; 6110 case DRM_FORMAT_XBGR16161616: 6111 case DRM_FORMAT_ABGR16161616: 6112 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 6113 break; 6114 default: 6115 drm_err(adev_to_drm(adev), 6116 "Unsupported screen format %p4cc\n", 6117 &fb->format->format); 6118 return -EINVAL; 6119 } 6120 6121 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 6122 case DRM_MODE_ROTATE_0: 6123 plane_info->rotation = ROTATION_ANGLE_0; 6124 break; 6125 case DRM_MODE_ROTATE_90: 6126 plane_info->rotation = ROTATION_ANGLE_90; 6127 break; 6128 case DRM_MODE_ROTATE_180: 6129 plane_info->rotation = ROTATION_ANGLE_180; 6130 break; 6131 case DRM_MODE_ROTATE_270: 6132 plane_info->rotation = ROTATION_ANGLE_270; 6133 break; 6134 default: 6135 plane_info->rotation = ROTATION_ANGLE_0; 6136 break; 6137 } 6138 6139 6140 plane_info->visible = true; 6141 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 6142 6143 plane_info->layer_index = plane_state->normalized_zpos; 6144 6145 ret = fill_plane_color_attributes(plane_state, plane_info->format, 6146 &plane_info->color_space); 6147 if (ret) 6148 return ret; 6149 6150 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 6151 plane_info->rotation, tiling_flags, 6152 &plane_info->tiling_info, 6153 &plane_info->plane_size, 6154 &plane_info->dcc, address, 6155 tmz_surface); 6156 if (ret) 6157 return ret; 6158 6159 amdgpu_dm_plane_fill_blending_from_plane_state( 6160 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 6161 &plane_info->global_alpha, &plane_info->global_alpha_value); 6162 6163 return 0; 6164 } 6165 6166 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 6167 struct dc_plane_state *dc_plane_state, 6168 struct drm_plane_state *plane_state, 6169 struct drm_crtc_state *crtc_state) 6170 { 6171 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 6172 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 6173 struct dc_scaling_info scaling_info; 6174 struct dc_plane_info plane_info; 6175 int ret; 6176 6177 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 6178 if (ret) 6179 return ret; 6180 6181 dc_plane_state->src_rect = scaling_info.src_rect; 6182 dc_plane_state->dst_rect = scaling_info.dst_rect; 6183 dc_plane_state->clip_rect = scaling_info.clip_rect; 6184 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 6185 6186 ret = fill_dc_plane_info_and_addr(adev, plane_state, 6187 afb->tiling_flags, 6188 &plane_info, 6189 &dc_plane_state->address, 6190 afb->tmz_surface); 6191 if (ret) 6192 return ret; 6193 6194 dc_plane_state->format = plane_info.format; 6195 dc_plane_state->color_space = plane_info.color_space; 6196 dc_plane_state->format = plane_info.format; 6197 dc_plane_state->plane_size = plane_info.plane_size; 6198 dc_plane_state->rotation = plane_info.rotation; 6199 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 6200 dc_plane_state->stereo_format = plane_info.stereo_format; 6201 dc_plane_state->tiling_info = plane_info.tiling_info; 6202 dc_plane_state->visible = plane_info.visible; 6203 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 6204 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 6205 dc_plane_state->global_alpha = plane_info.global_alpha; 6206 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 6207 dc_plane_state->dcc = plane_info.dcc; 6208 dc_plane_state->layer_index = plane_info.layer_index; 6209 dc_plane_state->flip_int_enabled = true; 6210 6211 /* 6212 * Always set input transfer function, since plane state is refreshed 6213 * every time. 6214 */ 6215 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, 6216 plane_state, 6217 dc_plane_state); 6218 if (ret) 6219 return ret; 6220 6221 return 0; 6222 } 6223 6224 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 6225 struct rect *dirty_rect, int32_t x, 6226 s32 y, s32 width, s32 height, 6227 int *i, bool ffu) 6228 { 6229 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 6230 6231 dirty_rect->x = x; 6232 dirty_rect->y = y; 6233 dirty_rect->width = width; 6234 dirty_rect->height = height; 6235 6236 if (ffu) 6237 drm_dbg(plane->dev, 6238 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 6239 plane->base.id, width, height); 6240 else 6241 drm_dbg(plane->dev, 6242 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 6243 plane->base.id, x, y, width, height); 6244 6245 (*i)++; 6246 } 6247 6248 /** 6249 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 6250 * 6251 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 6252 * remote fb 6253 * @old_plane_state: Old state of @plane 6254 * @new_plane_state: New state of @plane 6255 * @crtc_state: New state of CRTC connected to the @plane 6256 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 6257 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. 6258 * If PSR SU is enabled and damage clips are available, only the regions of the screen 6259 * that have changed will be updated. If PSR SU is not enabled, 6260 * or if damage clips are not available, the entire screen will be updated. 6261 * @dirty_regions_changed: dirty regions changed 6262 * 6263 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 6264 * (referred to as "damage clips" in DRM nomenclature) that require updating on 6265 * the eDP remote buffer. The responsibility of specifying the dirty regions is 6266 * amdgpu_dm's. 6267 * 6268 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 6269 * plane with regions that require flushing to the eDP remote buffer. In 6270 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 6271 * implicitly provide damage clips without any client support via the plane 6272 * bounds. 6273 */ 6274 static void fill_dc_dirty_rects(struct drm_plane *plane, 6275 struct drm_plane_state *old_plane_state, 6276 struct drm_plane_state *new_plane_state, 6277 struct drm_crtc_state *crtc_state, 6278 struct dc_flip_addrs *flip_addrs, 6279 bool is_psr_su, 6280 bool *dirty_regions_changed) 6281 { 6282 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 6283 struct rect *dirty_rects = flip_addrs->dirty_rects; 6284 u32 num_clips; 6285 struct drm_mode_rect *clips; 6286 bool bb_changed; 6287 bool fb_changed; 6288 u32 i = 0; 6289 *dirty_regions_changed = false; 6290 6291 /* 6292 * Cursor plane has it's own dirty rect update interface. See 6293 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 6294 */ 6295 if (plane->type == DRM_PLANE_TYPE_CURSOR) 6296 return; 6297 6298 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 6299 goto ffu; 6300 6301 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 6302 clips = drm_plane_get_damage_clips(new_plane_state); 6303 6304 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && 6305 is_psr_su))) 6306 goto ffu; 6307 6308 if (!dm_crtc_state->mpo_requested) { 6309 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 6310 goto ffu; 6311 6312 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 6313 fill_dc_dirty_rect(new_plane_state->plane, 6314 &dirty_rects[flip_addrs->dirty_rect_count], 6315 clips->x1, clips->y1, 6316 clips->x2 - clips->x1, clips->y2 - clips->y1, 6317 &flip_addrs->dirty_rect_count, 6318 false); 6319 return; 6320 } 6321 6322 /* 6323 * MPO is requested. Add entire plane bounding box to dirty rects if 6324 * flipped to or damaged. 6325 * 6326 * If plane is moved or resized, also add old bounding box to dirty 6327 * rects. 6328 */ 6329 fb_changed = old_plane_state->fb->base.id != 6330 new_plane_state->fb->base.id; 6331 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 6332 old_plane_state->crtc_y != new_plane_state->crtc_y || 6333 old_plane_state->crtc_w != new_plane_state->crtc_w || 6334 old_plane_state->crtc_h != new_plane_state->crtc_h); 6335 6336 drm_dbg(plane->dev, 6337 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 6338 new_plane_state->plane->base.id, 6339 bb_changed, fb_changed, num_clips); 6340 6341 *dirty_regions_changed = bb_changed; 6342 6343 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 6344 goto ffu; 6345 6346 if (bb_changed) { 6347 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 6348 new_plane_state->crtc_x, 6349 new_plane_state->crtc_y, 6350 new_plane_state->crtc_w, 6351 new_plane_state->crtc_h, &i, false); 6352 6353 /* Add old plane bounding-box if plane is moved or resized */ 6354 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 6355 old_plane_state->crtc_x, 6356 old_plane_state->crtc_y, 6357 old_plane_state->crtc_w, 6358 old_plane_state->crtc_h, &i, false); 6359 } 6360 6361 if (num_clips) { 6362 for (; i < num_clips; clips++) 6363 fill_dc_dirty_rect(new_plane_state->plane, 6364 &dirty_rects[i], clips->x1, 6365 clips->y1, clips->x2 - clips->x1, 6366 clips->y2 - clips->y1, &i, false); 6367 } else if (fb_changed && !bb_changed) { 6368 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 6369 new_plane_state->crtc_x, 6370 new_plane_state->crtc_y, 6371 new_plane_state->crtc_w, 6372 new_plane_state->crtc_h, &i, false); 6373 } 6374 6375 flip_addrs->dirty_rect_count = i; 6376 return; 6377 6378 ffu: 6379 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 6380 dm_crtc_state->base.mode.crtc_hdisplay, 6381 dm_crtc_state->base.mode.crtc_vdisplay, 6382 &flip_addrs->dirty_rect_count, true); 6383 } 6384 6385 static void update_stream_scaling_settings(struct drm_device *dev, 6386 const struct drm_display_mode *mode, 6387 const struct dm_connector_state *dm_state, 6388 struct dc_stream_state *stream) 6389 { 6390 enum amdgpu_rmx_type rmx_type; 6391 6392 struct rect src = { 0 }; /* viewport in composition space*/ 6393 struct rect dst = { 0 }; /* stream addressable area */ 6394 6395 /* no mode. nothing to be done */ 6396 if (!mode) 6397 return; 6398 6399 /* Full screen scaling by default */ 6400 src.width = mode->hdisplay; 6401 src.height = mode->vdisplay; 6402 dst.width = stream->timing.h_addressable; 6403 dst.height = stream->timing.v_addressable; 6404 6405 if (dm_state) { 6406 rmx_type = dm_state->scaling; 6407 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 6408 if (src.width * dst.height < 6409 src.height * dst.width) { 6410 /* height needs less upscaling/more downscaling */ 6411 dst.width = src.width * 6412 dst.height / src.height; 6413 } else { 6414 /* width needs less upscaling/more downscaling */ 6415 dst.height = src.height * 6416 dst.width / src.width; 6417 } 6418 } else if (rmx_type == RMX_CENTER) { 6419 dst = src; 6420 } 6421 6422 dst.x = (stream->timing.h_addressable - dst.width) / 2; 6423 dst.y = (stream->timing.v_addressable - dst.height) / 2; 6424 6425 if (dm_state->underscan_enable) { 6426 dst.x += dm_state->underscan_hborder / 2; 6427 dst.y += dm_state->underscan_vborder / 2; 6428 dst.width -= dm_state->underscan_hborder; 6429 dst.height -= dm_state->underscan_vborder; 6430 } 6431 } 6432 6433 stream->src = src; 6434 stream->dst = dst; 6435 6436 drm_dbg_kms(dev, "Destination Rectangle x:%d y:%d width:%d height:%d\n", 6437 dst.x, dst.y, dst.width, dst.height); 6438 6439 } 6440 6441 static enum dc_color_depth 6442 convert_color_depth_from_display_info(const struct drm_connector *connector, 6443 bool is_y420, int requested_bpc) 6444 { 6445 u8 bpc; 6446 6447 if (is_y420) { 6448 bpc = 8; 6449 6450 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 6451 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 6452 bpc = 16; 6453 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 6454 bpc = 12; 6455 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 6456 bpc = 10; 6457 } else { 6458 bpc = (uint8_t)connector->display_info.bpc; 6459 /* Assume 8 bpc by default if no bpc is specified. */ 6460 bpc = bpc ? bpc : 8; 6461 } 6462 6463 if (requested_bpc > 0) { 6464 /* 6465 * Cap display bpc based on the user requested value. 6466 * 6467 * The value for state->max_bpc may not correctly updated 6468 * depending on when the connector gets added to the state 6469 * or if this was called outside of atomic check, so it 6470 * can't be used directly. 6471 */ 6472 bpc = min_t(u8, bpc, requested_bpc); 6473 6474 /* Round down to the nearest even number. */ 6475 bpc = bpc - (bpc & 1); 6476 } 6477 6478 switch (bpc) { 6479 case 0: 6480 /* 6481 * Temporary Work around, DRM doesn't parse color depth for 6482 * EDID revision before 1.4 6483 * TODO: Fix edid parsing 6484 */ 6485 return COLOR_DEPTH_888; 6486 case 6: 6487 return COLOR_DEPTH_666; 6488 case 8: 6489 return COLOR_DEPTH_888; 6490 case 10: 6491 return COLOR_DEPTH_101010; 6492 case 12: 6493 return COLOR_DEPTH_121212; 6494 case 14: 6495 return COLOR_DEPTH_141414; 6496 case 16: 6497 return COLOR_DEPTH_161616; 6498 default: 6499 return COLOR_DEPTH_UNDEFINED; 6500 } 6501 } 6502 6503 static enum dc_aspect_ratio 6504 get_aspect_ratio(const struct drm_display_mode *mode_in) 6505 { 6506 /* 1-1 mapping, since both enums follow the HDMI spec. */ 6507 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 6508 } 6509 6510 static enum dc_color_space 6511 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 6512 const struct drm_connector_state *connector_state) 6513 { 6514 enum dc_color_space color_space = COLOR_SPACE_SRGB; 6515 6516 switch (connector_state->colorspace) { 6517 case DRM_MODE_COLORIMETRY_BT601_YCC: 6518 if (dc_crtc_timing->flags.Y_ONLY) 6519 color_space = COLOR_SPACE_YCBCR601_LIMITED; 6520 else 6521 color_space = COLOR_SPACE_YCBCR601; 6522 break; 6523 case DRM_MODE_COLORIMETRY_BT709_YCC: 6524 if (dc_crtc_timing->flags.Y_ONLY) 6525 color_space = COLOR_SPACE_YCBCR709_LIMITED; 6526 else 6527 color_space = COLOR_SPACE_YCBCR709; 6528 break; 6529 case DRM_MODE_COLORIMETRY_OPRGB: 6530 color_space = COLOR_SPACE_ADOBERGB; 6531 break; 6532 case DRM_MODE_COLORIMETRY_BT2020_RGB: 6533 case DRM_MODE_COLORIMETRY_BT2020_YCC: 6534 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 6535 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 6536 else 6537 color_space = COLOR_SPACE_2020_YCBCR_LIMITED; 6538 break; 6539 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 6540 default: 6541 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 6542 color_space = COLOR_SPACE_SRGB; 6543 if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED) 6544 color_space = COLOR_SPACE_SRGB_LIMITED; 6545 /* 6546 * 27030khz is the separation point between HDTV and SDTV 6547 * according to HDMI spec, we use YCbCr709 and YCbCr601 6548 * respectively 6549 */ 6550 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 6551 if (dc_crtc_timing->flags.Y_ONLY) 6552 color_space = 6553 COLOR_SPACE_YCBCR709_LIMITED; 6554 else 6555 color_space = COLOR_SPACE_YCBCR709; 6556 } else { 6557 if (dc_crtc_timing->flags.Y_ONLY) 6558 color_space = 6559 COLOR_SPACE_YCBCR601_LIMITED; 6560 else 6561 color_space = COLOR_SPACE_YCBCR601; 6562 } 6563 break; 6564 } 6565 6566 return color_space; 6567 } 6568 6569 static enum display_content_type 6570 get_output_content_type(const struct drm_connector_state *connector_state) 6571 { 6572 switch (connector_state->content_type) { 6573 default: 6574 case DRM_MODE_CONTENT_TYPE_NO_DATA: 6575 return DISPLAY_CONTENT_TYPE_NO_DATA; 6576 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 6577 return DISPLAY_CONTENT_TYPE_GRAPHICS; 6578 case DRM_MODE_CONTENT_TYPE_PHOTO: 6579 return DISPLAY_CONTENT_TYPE_PHOTO; 6580 case DRM_MODE_CONTENT_TYPE_CINEMA: 6581 return DISPLAY_CONTENT_TYPE_CINEMA; 6582 case DRM_MODE_CONTENT_TYPE_GAME: 6583 return DISPLAY_CONTENT_TYPE_GAME; 6584 } 6585 } 6586 6587 static bool adjust_colour_depth_from_display_info( 6588 struct dc_crtc_timing *timing_out, 6589 const struct drm_display_info *info) 6590 { 6591 enum dc_color_depth depth = timing_out->display_color_depth; 6592 int normalized_clk; 6593 6594 do { 6595 normalized_clk = timing_out->pix_clk_100hz / 10; 6596 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6597 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6598 normalized_clk /= 2; 6599 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6600 switch (depth) { 6601 case COLOR_DEPTH_888: 6602 break; 6603 case COLOR_DEPTH_101010: 6604 normalized_clk = (normalized_clk * 30) / 24; 6605 break; 6606 case COLOR_DEPTH_121212: 6607 normalized_clk = (normalized_clk * 36) / 24; 6608 break; 6609 case COLOR_DEPTH_161616: 6610 normalized_clk = (normalized_clk * 48) / 24; 6611 break; 6612 default: 6613 /* The above depths are the only ones valid for HDMI. */ 6614 return false; 6615 } 6616 if (normalized_clk <= info->max_tmds_clock) { 6617 timing_out->display_color_depth = depth; 6618 return true; 6619 } 6620 } while (--depth > COLOR_DEPTH_666); 6621 return false; 6622 } 6623 6624 static void fill_stream_properties_from_drm_display_mode( 6625 struct dc_stream_state *stream, 6626 const struct drm_display_mode *mode_in, 6627 const struct drm_connector *connector, 6628 const struct drm_connector_state *connector_state, 6629 const struct dc_stream_state *old_stream, 6630 int requested_bpc) 6631 { 6632 struct dc_crtc_timing *timing_out = &stream->timing; 6633 const struct drm_display_info *info = &connector->display_info; 6634 struct amdgpu_dm_connector *aconnector = NULL; 6635 struct hdmi_vendor_infoframe hv_frame; 6636 struct hdmi_avi_infoframe avi_frame; 6637 ssize_t err; 6638 6639 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 6640 aconnector = to_amdgpu_dm_connector(connector); 6641 6642 memset(&hv_frame, 0, sizeof(hv_frame)); 6643 memset(&avi_frame, 0, sizeof(avi_frame)); 6644 6645 timing_out->h_border_left = 0; 6646 timing_out->h_border_right = 0; 6647 timing_out->v_border_top = 0; 6648 timing_out->v_border_bottom = 0; 6649 /* TODO: un-hardcode */ 6650 if (drm_mode_is_420_only(info, mode_in) 6651 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6652 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6653 else if (drm_mode_is_420_also(info, mode_in) 6654 && aconnector 6655 && aconnector->force_yuv420_output) 6656 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6657 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) 6658 && aconnector 6659 && aconnector->force_yuv422_output) 6660 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422; 6661 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6662 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6663 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6664 else 6665 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6666 6667 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6668 timing_out->display_color_depth = convert_color_depth_from_display_info( 6669 connector, 6670 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6671 requested_bpc); 6672 timing_out->scan_type = SCANNING_TYPE_NODATA; 6673 timing_out->hdmi_vic = 0; 6674 6675 if (old_stream) { 6676 timing_out->vic = old_stream->timing.vic; 6677 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6678 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6679 } else { 6680 timing_out->vic = drm_match_cea_mode(mode_in); 6681 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6682 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6683 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6684 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6685 } 6686 6687 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6688 err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, 6689 (struct drm_connector *)connector, 6690 mode_in); 6691 if (err < 0) 6692 drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n", 6693 connector->name, err); 6694 timing_out->vic = avi_frame.video_code; 6695 err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, 6696 (struct drm_connector *)connector, 6697 mode_in); 6698 if (err < 0) 6699 drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n", 6700 connector->name, err); 6701 timing_out->hdmi_vic = hv_frame.vic; 6702 } 6703 6704 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 6705 timing_out->h_addressable = mode_in->hdisplay; 6706 timing_out->h_total = mode_in->htotal; 6707 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6708 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6709 timing_out->v_total = mode_in->vtotal; 6710 timing_out->v_addressable = mode_in->vdisplay; 6711 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6712 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6713 timing_out->pix_clk_100hz = mode_in->clock * 10; 6714 } else { 6715 timing_out->h_addressable = mode_in->crtc_hdisplay; 6716 timing_out->h_total = mode_in->crtc_htotal; 6717 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6718 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6719 timing_out->v_total = mode_in->crtc_vtotal; 6720 timing_out->v_addressable = mode_in->crtc_vdisplay; 6721 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6722 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6723 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6724 } 6725 6726 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6727 6728 stream->out_transfer_func.type = TF_TYPE_PREDEFINED; 6729 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; 6730 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6731 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6732 drm_mode_is_420_also(info, mode_in) && 6733 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6734 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6735 adjust_colour_depth_from_display_info(timing_out, info); 6736 } 6737 } 6738 6739 stream->output_color_space = get_output_color_space(timing_out, connector_state); 6740 stream->content_type = get_output_content_type(connector_state); 6741 } 6742 6743 static void fill_audio_info(struct audio_info *audio_info, 6744 const struct drm_connector *drm_connector, 6745 const struct dc_sink *dc_sink) 6746 { 6747 int i = 0; 6748 int cea_revision = 0; 6749 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6750 6751 audio_info->manufacture_id = edid_caps->manufacturer_id; 6752 audio_info->product_id = edid_caps->product_id; 6753 6754 cea_revision = drm_connector->display_info.cea_rev; 6755 6756 strscpy(audio_info->display_name, 6757 edid_caps->display_name, 6758 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6759 6760 if (cea_revision >= 3) { 6761 audio_info->mode_count = edid_caps->audio_mode_count; 6762 6763 for (i = 0; i < audio_info->mode_count; ++i) { 6764 audio_info->modes[i].format_code = 6765 (enum audio_format_code) 6766 (edid_caps->audio_modes[i].format_code); 6767 audio_info->modes[i].channel_count = 6768 edid_caps->audio_modes[i].channel_count; 6769 audio_info->modes[i].sample_rates.all = 6770 edid_caps->audio_modes[i].sample_rate; 6771 audio_info->modes[i].sample_size = 6772 edid_caps->audio_modes[i].sample_size; 6773 } 6774 } 6775 6776 audio_info->flags.all = edid_caps->speaker_flags; 6777 6778 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6779 if (drm_connector->latency_present[0]) { 6780 audio_info->video_latency = drm_connector->video_latency[0]; 6781 audio_info->audio_latency = drm_connector->audio_latency[0]; 6782 } 6783 6784 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6785 6786 } 6787 6788 static void 6789 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6790 struct drm_display_mode *dst_mode) 6791 { 6792 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6793 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6794 dst_mode->crtc_clock = src_mode->crtc_clock; 6795 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6796 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6797 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6798 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6799 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6800 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6801 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6802 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6803 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6804 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6805 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6806 } 6807 6808 static void 6809 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6810 const struct drm_display_mode *native_mode, 6811 bool scale_enabled) 6812 { 6813 if (scale_enabled || ( 6814 native_mode->clock == drm_mode->clock && 6815 native_mode->htotal == drm_mode->htotal && 6816 native_mode->vtotal == drm_mode->vtotal)) { 6817 if (native_mode->crtc_clock) 6818 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6819 } else { 6820 /* no scaling nor amdgpu inserted, no need to patch */ 6821 } 6822 } 6823 6824 static struct dc_sink * 6825 create_fake_sink(struct drm_device *dev, struct dc_link *link) 6826 { 6827 struct dc_sink_init_data sink_init_data = { 0 }; 6828 struct dc_sink *sink = NULL; 6829 6830 sink_init_data.link = link; 6831 sink_init_data.sink_signal = link->connector_signal; 6832 6833 sink = dc_sink_create(&sink_init_data); 6834 if (!sink) { 6835 drm_err(dev, "Failed to create sink!\n"); 6836 return NULL; 6837 } 6838 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6839 6840 return sink; 6841 } 6842 6843 static void set_multisync_trigger_params( 6844 struct dc_stream_state *stream) 6845 { 6846 struct dc_stream_state *master = NULL; 6847 6848 if (stream->triggered_crtc_reset.enabled) { 6849 master = stream->triggered_crtc_reset.event_source; 6850 stream->triggered_crtc_reset.event = 6851 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6852 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6853 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6854 } 6855 } 6856 6857 static void set_master_stream(struct dc_stream_state *stream_set[], 6858 int stream_count) 6859 { 6860 int j, highest_rfr = 0, master_stream = 0; 6861 6862 for (j = 0; j < stream_count; j++) { 6863 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6864 int refresh_rate = 0; 6865 6866 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6867 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6868 if (refresh_rate > highest_rfr) { 6869 highest_rfr = refresh_rate; 6870 master_stream = j; 6871 } 6872 } 6873 } 6874 for (j = 0; j < stream_count; j++) { 6875 if (stream_set[j]) 6876 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6877 } 6878 } 6879 6880 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6881 { 6882 int i = 0; 6883 struct dc_stream_state *stream; 6884 6885 if (context->stream_count < 2) 6886 return; 6887 for (i = 0; i < context->stream_count ; i++) { 6888 if (!context->streams[i]) 6889 continue; 6890 /* 6891 * TODO: add a function to read AMD VSDB bits and set 6892 * crtc_sync_master.multi_sync_enabled flag 6893 * For now it's set to false 6894 */ 6895 } 6896 6897 set_master_stream(context->streams, context->stream_count); 6898 6899 for (i = 0; i < context->stream_count ; i++) { 6900 stream = context->streams[i]; 6901 6902 if (!stream) 6903 continue; 6904 6905 set_multisync_trigger_params(stream); 6906 } 6907 } 6908 6909 /** 6910 * DOC: FreeSync Video 6911 * 6912 * When a userspace application wants to play a video, the content follows a 6913 * standard format definition that usually specifies the FPS for that format. 6914 * The below list illustrates some video format and the expected FPS, 6915 * respectively: 6916 * 6917 * - TV/NTSC (23.976 FPS) 6918 * - Cinema (24 FPS) 6919 * - TV/PAL (25 FPS) 6920 * - TV/NTSC (29.97 FPS) 6921 * - TV/NTSC (30 FPS) 6922 * - Cinema HFR (48 FPS) 6923 * - TV/PAL (50 FPS) 6924 * - Commonly used (60 FPS) 6925 * - Multiples of 24 (48,72,96 FPS) 6926 * 6927 * The list of standards video format is not huge and can be added to the 6928 * connector modeset list beforehand. With that, userspace can leverage 6929 * FreeSync to extends the front porch in order to attain the target refresh 6930 * rate. Such a switch will happen seamlessly, without screen blanking or 6931 * reprogramming of the output in any other way. If the userspace requests a 6932 * modesetting change compatible with FreeSync modes that only differ in the 6933 * refresh rate, DC will skip the full update and avoid blink during the 6934 * transition. For example, the video player can change the modesetting from 6935 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6936 * causing any display blink. This same concept can be applied to a mode 6937 * setting change. 6938 */ 6939 static struct drm_display_mode * 6940 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6941 bool use_probed_modes) 6942 { 6943 struct drm_display_mode *m, *m_pref = NULL; 6944 u16 current_refresh, highest_refresh; 6945 struct list_head *list_head = use_probed_modes ? 6946 &aconnector->base.probed_modes : 6947 &aconnector->base.modes; 6948 6949 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6950 return NULL; 6951 6952 if (aconnector->freesync_vid_base.clock != 0) 6953 return &aconnector->freesync_vid_base; 6954 6955 /* Find the preferred mode */ 6956 list_for_each_entry(m, list_head, head) { 6957 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6958 m_pref = m; 6959 break; 6960 } 6961 } 6962 6963 if (!m_pref) { 6964 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6965 m_pref = list_first_entry_or_null( 6966 &aconnector->base.modes, struct drm_display_mode, head); 6967 if (!m_pref) { 6968 drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); 6969 return NULL; 6970 } 6971 } 6972 6973 highest_refresh = drm_mode_vrefresh(m_pref); 6974 6975 /* 6976 * Find the mode with highest refresh rate with same resolution. 6977 * For some monitors, preferred mode is not the mode with highest 6978 * supported refresh rate. 6979 */ 6980 list_for_each_entry(m, list_head, head) { 6981 current_refresh = drm_mode_vrefresh(m); 6982 6983 if (m->hdisplay == m_pref->hdisplay && 6984 m->vdisplay == m_pref->vdisplay && 6985 highest_refresh < current_refresh) { 6986 highest_refresh = current_refresh; 6987 m_pref = m; 6988 } 6989 } 6990 6991 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6992 return m_pref; 6993 } 6994 6995 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6996 struct amdgpu_dm_connector *aconnector) 6997 { 6998 struct drm_display_mode *high_mode; 6999 int timing_diff; 7000 7001 high_mode = get_highest_refresh_rate_mode(aconnector, false); 7002 if (!high_mode || !mode) 7003 return false; 7004 7005 timing_diff = high_mode->vtotal - mode->vtotal; 7006 7007 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 7008 high_mode->hdisplay != mode->hdisplay || 7009 high_mode->vdisplay != mode->vdisplay || 7010 high_mode->hsync_start != mode->hsync_start || 7011 high_mode->hsync_end != mode->hsync_end || 7012 high_mode->htotal != mode->htotal || 7013 high_mode->hskew != mode->hskew || 7014 high_mode->vscan != mode->vscan || 7015 high_mode->vsync_start - mode->vsync_start != timing_diff || 7016 high_mode->vsync_end - mode->vsync_end != timing_diff) 7017 return false; 7018 else 7019 return true; 7020 } 7021 7022 #if defined(CONFIG_DRM_AMD_DC_FP) 7023 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 7024 struct dc_sink *sink, struct dc_stream_state *stream, 7025 struct dsc_dec_dpcd_caps *dsc_caps) 7026 { 7027 stream->timing.flags.DSC = 0; 7028 dsc_caps->is_dsc_supported = false; 7029 7030 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 7031 sink->sink_signal == SIGNAL_TYPE_EDP)) { 7032 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 7033 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 7034 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 7035 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 7036 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 7037 dsc_caps); 7038 } 7039 } 7040 7041 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 7042 struct dc_sink *sink, struct dc_stream_state *stream, 7043 struct dsc_dec_dpcd_caps *dsc_caps, 7044 uint32_t max_dsc_target_bpp_limit_override) 7045 { 7046 const struct dc_link_settings *verified_link_cap = NULL; 7047 u32 link_bw_in_kbps; 7048 u32 edp_min_bpp_x16, edp_max_bpp_x16; 7049 struct dc *dc = sink->ctx->dc; 7050 struct dc_dsc_bw_range bw_range = {0}; 7051 struct dc_dsc_config dsc_cfg = {0}; 7052 struct dc_dsc_config_options dsc_options = {0}; 7053 7054 dc_dsc_get_default_config_option(dc, &dsc_options); 7055 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 7056 7057 verified_link_cap = dc_link_get_link_cap(stream->link); 7058 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 7059 edp_min_bpp_x16 = 8 * 16; 7060 edp_max_bpp_x16 = 8 * 16; 7061 7062 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 7063 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 7064 7065 if (edp_max_bpp_x16 < edp_min_bpp_x16) 7066 edp_min_bpp_x16 = edp_max_bpp_x16; 7067 7068 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 7069 dc->debug.dsc_min_slice_height_override, 7070 edp_min_bpp_x16, edp_max_bpp_x16, 7071 dsc_caps, 7072 &stream->timing, 7073 dc_link_get_highest_encoding_format(aconnector->dc_link), 7074 &bw_range)) { 7075 7076 if (bw_range.max_kbps < link_bw_in_kbps) { 7077 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 7078 dsc_caps, 7079 &dsc_options, 7080 0, 7081 &stream->timing, 7082 dc_link_get_highest_encoding_format(aconnector->dc_link), 7083 &dsc_cfg)) { 7084 stream->timing.dsc_cfg = dsc_cfg; 7085 stream->timing.flags.DSC = 1; 7086 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 7087 } 7088 return; 7089 } 7090 } 7091 7092 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 7093 dsc_caps, 7094 &dsc_options, 7095 link_bw_in_kbps, 7096 &stream->timing, 7097 dc_link_get_highest_encoding_format(aconnector->dc_link), 7098 &dsc_cfg)) { 7099 stream->timing.dsc_cfg = dsc_cfg; 7100 stream->timing.flags.DSC = 1; 7101 } 7102 } 7103 7104 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 7105 struct dc_sink *sink, struct dc_stream_state *stream, 7106 struct dsc_dec_dpcd_caps *dsc_caps) 7107 { 7108 struct drm_connector *drm_connector = &aconnector->base; 7109 u32 link_bandwidth_kbps; 7110 struct dc *dc = sink->ctx->dc; 7111 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 7112 u32 dsc_max_supported_bw_in_kbps; 7113 u32 max_dsc_target_bpp_limit_override = 7114 drm_connector->display_info.max_dsc_bpp; 7115 struct dc_dsc_config_options dsc_options = {0}; 7116 7117 dc_dsc_get_default_config_option(dc, &dsc_options); 7118 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 7119 7120 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 7121 dc_link_get_link_cap(aconnector->dc_link)); 7122 7123 /* Set DSC policy according to dsc_clock_en */ 7124 dc_dsc_policy_set_enable_dsc_when_not_needed( 7125 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 7126 7127 if (sink->sink_signal == SIGNAL_TYPE_EDP && 7128 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 7129 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 7130 7131 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 7132 7133 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7134 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 7135 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 7136 dsc_caps, 7137 &dsc_options, 7138 link_bandwidth_kbps, 7139 &stream->timing, 7140 dc_link_get_highest_encoding_format(aconnector->dc_link), 7141 &stream->timing.dsc_cfg)) { 7142 stream->timing.flags.DSC = 1; 7143 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", 7144 __func__, drm_connector->name); 7145 } 7146 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 7147 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 7148 dc_link_get_highest_encoding_format(aconnector->dc_link)); 7149 max_supported_bw_in_kbps = link_bandwidth_kbps; 7150 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 7151 7152 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 7153 max_supported_bw_in_kbps > 0 && 7154 dsc_max_supported_bw_in_kbps > 0) 7155 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 7156 dsc_caps, 7157 &dsc_options, 7158 dsc_max_supported_bw_in_kbps, 7159 &stream->timing, 7160 dc_link_get_highest_encoding_format(aconnector->dc_link), 7161 &stream->timing.dsc_cfg)) { 7162 stream->timing.flags.DSC = 1; 7163 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", 7164 __func__, drm_connector->name); 7165 } 7166 } 7167 } 7168 7169 /* Overwrite the stream flag if DSC is enabled through debugfs */ 7170 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 7171 stream->timing.flags.DSC = 1; 7172 7173 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 7174 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 7175 7176 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 7177 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 7178 7179 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 7180 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 7181 } 7182 #endif 7183 7184 static struct dc_stream_state * 7185 create_stream_for_sink(struct drm_connector *connector, 7186 const struct drm_display_mode *drm_mode, 7187 const struct dm_connector_state *dm_state, 7188 const struct dc_stream_state *old_stream, 7189 int requested_bpc) 7190 { 7191 struct drm_device *dev = connector->dev; 7192 struct amdgpu_dm_connector *aconnector = NULL; 7193 struct drm_display_mode *preferred_mode = NULL; 7194 const struct drm_connector_state *con_state = &dm_state->base; 7195 struct dc_stream_state *stream = NULL; 7196 struct drm_display_mode mode; 7197 struct drm_display_mode saved_mode; 7198 struct drm_display_mode *freesync_mode = NULL; 7199 bool native_mode_found = false; 7200 bool recalculate_timing = false; 7201 bool scale = dm_state->scaling != RMX_OFF; 7202 int mode_refresh; 7203 int preferred_refresh = 0; 7204 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 7205 #if defined(CONFIG_DRM_AMD_DC_FP) 7206 struct dsc_dec_dpcd_caps dsc_caps; 7207 #endif 7208 struct dc_link *link = NULL; 7209 struct dc_sink *sink = NULL; 7210 7211 drm_mode_init(&mode, drm_mode); 7212 memset(&saved_mode, 0, sizeof(saved_mode)); 7213 7214 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 7215 aconnector = NULL; 7216 aconnector = to_amdgpu_dm_connector(connector); 7217 link = aconnector->dc_link; 7218 } else { 7219 struct drm_writeback_connector *wbcon = NULL; 7220 struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 7221 7222 wbcon = drm_connector_to_writeback(connector); 7223 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 7224 link = dm_wbcon->link; 7225 } 7226 7227 if (!aconnector || !aconnector->dc_sink) { 7228 sink = create_fake_sink(dev, link); 7229 if (!sink) 7230 return stream; 7231 7232 } else { 7233 sink = aconnector->dc_sink; 7234 dc_sink_retain(sink); 7235 } 7236 7237 stream = dc_create_stream_for_sink(sink); 7238 7239 if (stream == NULL) { 7240 drm_err(dev, "Failed to create stream for sink!\n"); 7241 goto finish; 7242 } 7243 7244 /* We leave this NULL for writeback connectors */ 7245 stream->dm_stream_context = aconnector; 7246 7247 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 7248 connector->display_info.hdmi.scdc.scrambling.low_rates; 7249 7250 list_for_each_entry(preferred_mode, &connector->modes, head) { 7251 /* Search for preferred mode */ 7252 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 7253 native_mode_found = true; 7254 break; 7255 } 7256 } 7257 if (!native_mode_found) 7258 preferred_mode = list_first_entry_or_null( 7259 &connector->modes, 7260 struct drm_display_mode, 7261 head); 7262 7263 mode_refresh = drm_mode_vrefresh(&mode); 7264 7265 if (preferred_mode == NULL) { 7266 /* 7267 * This may not be an error, the use case is when we have no 7268 * usermode calls to reset and set mode upon hotplug. In this 7269 * case, we call set mode ourselves to restore the previous mode 7270 * and the modelist may not be filled in time. 7271 */ 7272 drm_dbg_driver(dev, "No preferred mode found\n"); 7273 } else if (aconnector) { 7274 recalculate_timing = amdgpu_freesync_vid_mode && 7275 is_freesync_video_mode(&mode, aconnector); 7276 if (recalculate_timing) { 7277 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 7278 drm_mode_copy(&saved_mode, &mode); 7279 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 7280 drm_mode_copy(&mode, freesync_mode); 7281 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 7282 } else { 7283 decide_crtc_timing_for_drm_display_mode( 7284 &mode, preferred_mode, scale); 7285 7286 preferred_refresh = drm_mode_vrefresh(preferred_mode); 7287 } 7288 } 7289 7290 if (recalculate_timing) 7291 drm_mode_set_crtcinfo(&saved_mode, 0); 7292 7293 /* 7294 * If scaling is enabled and refresh rate didn't change 7295 * we copy the vic and polarities of the old timings 7296 */ 7297 if (!scale || mode_refresh != preferred_refresh) 7298 fill_stream_properties_from_drm_display_mode( 7299 stream, &mode, connector, con_state, NULL, 7300 requested_bpc); 7301 else 7302 fill_stream_properties_from_drm_display_mode( 7303 stream, &mode, connector, con_state, old_stream, 7304 requested_bpc); 7305 7306 /* The rest isn't needed for writeback connectors */ 7307 if (!aconnector) 7308 goto finish; 7309 7310 if (aconnector->timing_changed) { 7311 drm_dbg(aconnector->base.dev, 7312 "overriding timing for automated test, bpc %d, changing to %d\n", 7313 stream->timing.display_color_depth, 7314 aconnector->timing_requested->display_color_depth); 7315 stream->timing = *aconnector->timing_requested; 7316 } 7317 7318 #if defined(CONFIG_DRM_AMD_DC_FP) 7319 /* SST DSC determination policy */ 7320 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 7321 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 7322 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 7323 #endif 7324 7325 update_stream_scaling_settings(dev, &mode, dm_state, stream); 7326 7327 fill_audio_info( 7328 &stream->audio_info, 7329 connector, 7330 sink); 7331 7332 update_stream_signal(stream, sink); 7333 7334 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 7335 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 7336 7337 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 7338 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 7339 stream->signal == SIGNAL_TYPE_EDP) { 7340 const struct dc_edid_caps *edid_caps; 7341 unsigned int disable_colorimetry = 0; 7342 7343 if (aconnector->dc_sink) { 7344 edid_caps = &aconnector->dc_sink->edid_caps; 7345 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; 7346 } 7347 7348 // 7349 // should decide stream support vsc sdp colorimetry capability 7350 // before building vsc info packet 7351 // 7352 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 7353 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && 7354 !disable_colorimetry; 7355 7356 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 7357 tf = TRANSFER_FUNC_GAMMA_22; 7358 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 7359 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 7360 7361 } 7362 finish: 7363 dc_sink_release(sink); 7364 7365 return stream; 7366 } 7367 7368 /** 7369 * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display 7370 * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link) 7371 * @force: if true, force polling even when DAC load detection was used 7372 * 7373 * Used for connectors that don't support HPD (hotplug detection) to 7374 * periodically check whether the connector is connected to a display. 7375 * 7376 * When connection was determined via DAC load detection, we avoid 7377 * re-running it on normal polls to prevent visible glitches, unless 7378 * @force is set. 7379 * 7380 * Return: The probed connector status (connected/disconnected/unknown). 7381 */ 7382 static enum drm_connector_status 7383 amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force) 7384 { 7385 struct drm_connector *connector = &aconnector->base; 7386 struct drm_device *dev = connector->dev; 7387 struct amdgpu_device *adev = drm_to_adev(dev); 7388 struct dc_link *link = aconnector->dc_link; 7389 enum dc_connection_type conn_type = dc_connection_none; 7390 enum drm_connector_status status = connector_status_disconnected; 7391 7392 /* When we determined the connection using DAC load detection, 7393 * do NOT poll the connector do detect disconnect because 7394 * that would run DAC load detection again which can cause 7395 * visible visual glitches. 7396 * 7397 * Only allow to poll such a connector again when forcing. 7398 */ 7399 if (!force && link->local_sink && link->type == dc_connection_analog_load) 7400 return connector->status; 7401 7402 mutex_lock(&aconnector->hpd_lock); 7403 7404 if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) && 7405 conn_type != dc_connection_none) { 7406 mutex_lock(&adev->dm.dc_lock); 7407 7408 /* Only call full link detection when a sink isn't created yet, 7409 * ie. just when the display is plugged in, otherwise we risk flickering. 7410 */ 7411 if (link->local_sink || 7412 dc_link_detect(link, DETECT_REASON_HPD)) 7413 status = connector_status_connected; 7414 7415 mutex_unlock(&adev->dm.dc_lock); 7416 } 7417 7418 if (connector->status != status) { 7419 if (status == connector_status_disconnected) { 7420 if (link->local_sink) 7421 dc_sink_release(link->local_sink); 7422 7423 link->local_sink = NULL; 7424 link->dpcd_sink_count = 0; 7425 link->type = dc_connection_none; 7426 } 7427 7428 amdgpu_dm_update_connector_after_detect(aconnector); 7429 } 7430 7431 mutex_unlock(&aconnector->hpd_lock); 7432 return status; 7433 } 7434 7435 /** 7436 * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display 7437 * 7438 * A connector is considered connected when it has a sink that is not NULL. 7439 * For connectors that support HPD (hotplug detection), the connection is 7440 * handled in the HPD interrupt. 7441 * For connectors that may not support HPD, such as analog connectors, 7442 * DRM will call this function repeatedly to poll them. 7443 * 7444 * Notes: 7445 * 1. This interface is NOT called in context of HPD irq. 7446 * 2. This interface *is called* in context of user-mode ioctl. Which 7447 * makes it a bad place for *any* MST-related activity. 7448 * 7449 * @connector: The DRM connector we are checking. We convert it to 7450 * amdgpu_dm_connector so we can read the DC link and state. 7451 * @force: If true, do a full detect again. This is used even when 7452 * a lighter check would normally be used to avoid flicker. 7453 * 7454 * Return: The connector status (connected, disconnected, or unknown). 7455 * 7456 */ 7457 static enum drm_connector_status 7458 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 7459 { 7460 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7461 7462 update_subconnector_property(aconnector); 7463 7464 if (aconnector->base.force == DRM_FORCE_ON || 7465 aconnector->base.force == DRM_FORCE_ON_DIGITAL) 7466 return connector_status_connected; 7467 else if (aconnector->base.force == DRM_FORCE_OFF) 7468 return connector_status_disconnected; 7469 7470 /* Poll analog connectors and only when either 7471 * disconnected or connected to an analog display. 7472 */ 7473 if (drm_kms_helper_is_poll_worker() && 7474 dc_connector_supports_analog(aconnector->dc_link->link_id.id) && 7475 (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog)) 7476 return amdgpu_dm_connector_poll(aconnector, force); 7477 7478 return (aconnector->dc_sink ? connector_status_connected : 7479 connector_status_disconnected); 7480 } 7481 7482 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 7483 struct drm_connector_state *connector_state, 7484 struct drm_property *property, 7485 uint64_t val) 7486 { 7487 struct drm_device *dev = connector->dev; 7488 struct amdgpu_device *adev = drm_to_adev(dev); 7489 struct dm_connector_state *dm_old_state = 7490 to_dm_connector_state(connector->state); 7491 struct dm_connector_state *dm_new_state = 7492 to_dm_connector_state(connector_state); 7493 7494 int ret = -EINVAL; 7495 7496 if (property == dev->mode_config.scaling_mode_property) { 7497 enum amdgpu_rmx_type rmx_type; 7498 7499 switch (val) { 7500 case DRM_MODE_SCALE_CENTER: 7501 rmx_type = RMX_CENTER; 7502 break; 7503 case DRM_MODE_SCALE_ASPECT: 7504 rmx_type = RMX_ASPECT; 7505 break; 7506 case DRM_MODE_SCALE_FULLSCREEN: 7507 rmx_type = RMX_FULL; 7508 break; 7509 case DRM_MODE_SCALE_NONE: 7510 default: 7511 rmx_type = RMX_OFF; 7512 break; 7513 } 7514 7515 if (dm_old_state->scaling == rmx_type) 7516 return 0; 7517 7518 dm_new_state->scaling = rmx_type; 7519 ret = 0; 7520 } else if (property == adev->mode_info.underscan_hborder_property) { 7521 dm_new_state->underscan_hborder = val; 7522 ret = 0; 7523 } else if (property == adev->mode_info.underscan_vborder_property) { 7524 dm_new_state->underscan_vborder = val; 7525 ret = 0; 7526 } else if (property == adev->mode_info.underscan_property) { 7527 dm_new_state->underscan_enable = val; 7528 ret = 0; 7529 } else if (property == adev->mode_info.abm_level_property) { 7530 switch (val) { 7531 case ABM_SYSFS_CONTROL: 7532 dm_new_state->abm_sysfs_forbidden = false; 7533 break; 7534 case ABM_LEVEL_OFF: 7535 dm_new_state->abm_sysfs_forbidden = true; 7536 dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7537 break; 7538 default: 7539 dm_new_state->abm_sysfs_forbidden = true; 7540 dm_new_state->abm_level = val; 7541 } 7542 ret = 0; 7543 } 7544 7545 return ret; 7546 } 7547 7548 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 7549 const struct drm_connector_state *state, 7550 struct drm_property *property, 7551 uint64_t *val) 7552 { 7553 struct drm_device *dev = connector->dev; 7554 struct amdgpu_device *adev = drm_to_adev(dev); 7555 struct dm_connector_state *dm_state = 7556 to_dm_connector_state(state); 7557 int ret = -EINVAL; 7558 7559 if (property == dev->mode_config.scaling_mode_property) { 7560 switch (dm_state->scaling) { 7561 case RMX_CENTER: 7562 *val = DRM_MODE_SCALE_CENTER; 7563 break; 7564 case RMX_ASPECT: 7565 *val = DRM_MODE_SCALE_ASPECT; 7566 break; 7567 case RMX_FULL: 7568 *val = DRM_MODE_SCALE_FULLSCREEN; 7569 break; 7570 case RMX_OFF: 7571 default: 7572 *val = DRM_MODE_SCALE_NONE; 7573 break; 7574 } 7575 ret = 0; 7576 } else if (property == adev->mode_info.underscan_hborder_property) { 7577 *val = dm_state->underscan_hborder; 7578 ret = 0; 7579 } else if (property == adev->mode_info.underscan_vborder_property) { 7580 *val = dm_state->underscan_vborder; 7581 ret = 0; 7582 } else if (property == adev->mode_info.underscan_property) { 7583 *val = dm_state->underscan_enable; 7584 ret = 0; 7585 } else if (property == adev->mode_info.abm_level_property) { 7586 if (!dm_state->abm_sysfs_forbidden) 7587 *val = ABM_SYSFS_CONTROL; 7588 else 7589 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? 7590 dm_state->abm_level : 0; 7591 ret = 0; 7592 } 7593 7594 return ret; 7595 } 7596 7597 /** 7598 * DOC: panel power savings 7599 * 7600 * The display manager allows you to set your desired **panel power savings** 7601 * level (between 0-4, with 0 representing off), e.g. using the following:: 7602 * 7603 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings 7604 * 7605 * Modifying this value can have implications on color accuracy, so tread 7606 * carefully. 7607 */ 7608 7609 static ssize_t panel_power_savings_show(struct device *device, 7610 struct device_attribute *attr, 7611 char *buf) 7612 { 7613 struct drm_connector *connector = dev_get_drvdata(device); 7614 struct drm_device *dev = connector->dev; 7615 u8 val; 7616 7617 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 7618 val = to_dm_connector_state(connector->state)->abm_level == 7619 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : 7620 to_dm_connector_state(connector->state)->abm_level; 7621 drm_modeset_unlock(&dev->mode_config.connection_mutex); 7622 7623 return sysfs_emit(buf, "%u\n", val); 7624 } 7625 7626 static ssize_t panel_power_savings_store(struct device *device, 7627 struct device_attribute *attr, 7628 const char *buf, size_t count) 7629 { 7630 struct drm_connector *connector = dev_get_drvdata(device); 7631 struct drm_device *dev = connector->dev; 7632 long val; 7633 int ret; 7634 7635 ret = kstrtol(buf, 0, &val); 7636 7637 if (ret) 7638 return ret; 7639 7640 if (val < 0 || val > 4) 7641 return -EINVAL; 7642 7643 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 7644 if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden) 7645 ret = -EBUSY; 7646 else 7647 to_dm_connector_state(connector->state)->abm_level = val ?: 7648 ABM_LEVEL_IMMEDIATE_DISABLE; 7649 drm_modeset_unlock(&dev->mode_config.connection_mutex); 7650 7651 if (ret) 7652 return ret; 7653 7654 drm_kms_helper_hotplug_event(dev); 7655 7656 return count; 7657 } 7658 7659 static DEVICE_ATTR_RW(panel_power_savings); 7660 7661 static struct attribute *amdgpu_attrs[] = { 7662 &dev_attr_panel_power_savings.attr, 7663 NULL 7664 }; 7665 7666 static const struct attribute_group amdgpu_group = { 7667 .name = "amdgpu", 7668 .attrs = amdgpu_attrs 7669 }; 7670 7671 static bool 7672 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) 7673 { 7674 if (amdgpu_dm_abm_level >= 0) 7675 return false; 7676 7677 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 7678 return false; 7679 7680 /* check for OLED panels */ 7681 if (amdgpu_dm_connector->bl_idx >= 0) { 7682 struct drm_device *drm = amdgpu_dm_connector->base.dev; 7683 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 7684 struct amdgpu_dm_backlight_caps *caps; 7685 7686 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; 7687 if (caps->aux_support) 7688 return false; 7689 } 7690 7691 return true; 7692 } 7693 7694 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7695 { 7696 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7697 7698 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) 7699 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); 7700 7701 cec_notifier_conn_unregister(amdgpu_dm_connector->notifier); 7702 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7703 } 7704 7705 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7706 { 7707 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7708 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7709 struct amdgpu_display_manager *dm = &adev->dm; 7710 7711 /* 7712 * Call only if mst_mgr was initialized before since it's not done 7713 * for all connector types. 7714 */ 7715 if (aconnector->mst_mgr.dev) 7716 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7717 7718 /* Cancel and flush any pending HDMI HPD debounce work */ 7719 if (aconnector->hdmi_hpd_debounce_delay_ms) { 7720 cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); 7721 if (aconnector->hdmi_prev_sink) { 7722 dc_sink_release(aconnector->hdmi_prev_sink); 7723 aconnector->hdmi_prev_sink = NULL; 7724 } 7725 } 7726 7727 if (aconnector->bl_idx != -1) { 7728 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 7729 dm->backlight_dev[aconnector->bl_idx] = NULL; 7730 } 7731 7732 if (aconnector->dc_em_sink) 7733 dc_sink_release(aconnector->dc_em_sink); 7734 aconnector->dc_em_sink = NULL; 7735 if (aconnector->dc_sink) 7736 dc_sink_release(aconnector->dc_sink); 7737 aconnector->dc_sink = NULL; 7738 7739 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7740 drm_connector_unregister(connector); 7741 drm_connector_cleanup(connector); 7742 kfree(aconnector->dm_dp_aux.aux.name); 7743 7744 kfree(connector); 7745 } 7746 7747 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7748 { 7749 struct dm_connector_state *state = 7750 to_dm_connector_state(connector->state); 7751 7752 if (connector->state) 7753 __drm_atomic_helper_connector_destroy_state(connector->state); 7754 7755 kfree(state); 7756 7757 state = kzalloc_obj(*state); 7758 7759 if (state) { 7760 state->scaling = RMX_OFF; 7761 state->underscan_enable = false; 7762 state->underscan_hborder = 0; 7763 state->underscan_vborder = 0; 7764 state->base.max_requested_bpc = 8; 7765 state->vcpi_slots = 0; 7766 state->pbn = 0; 7767 7768 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 7769 if (amdgpu_dm_abm_level <= 0) 7770 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7771 else 7772 state->abm_level = amdgpu_dm_abm_level; 7773 } 7774 7775 __drm_atomic_helper_connector_reset(connector, &state->base); 7776 } 7777 } 7778 7779 struct drm_connector_state * 7780 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7781 { 7782 struct dm_connector_state *state = 7783 to_dm_connector_state(connector->state); 7784 7785 struct dm_connector_state *new_state = 7786 kmemdup(state, sizeof(*state), GFP_KERNEL); 7787 7788 if (!new_state) 7789 return NULL; 7790 7791 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7792 7793 new_state->freesync_capable = state->freesync_capable; 7794 new_state->abm_level = state->abm_level; 7795 new_state->scaling = state->scaling; 7796 new_state->underscan_enable = state->underscan_enable; 7797 new_state->underscan_hborder = state->underscan_hborder; 7798 new_state->underscan_vborder = state->underscan_vborder; 7799 new_state->vcpi_slots = state->vcpi_slots; 7800 new_state->pbn = state->pbn; 7801 return &new_state->base; 7802 } 7803 7804 static int 7805 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7806 { 7807 struct amdgpu_dm_connector *amdgpu_dm_connector = 7808 to_amdgpu_dm_connector(connector); 7809 int r; 7810 7811 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { 7812 r = sysfs_create_group(&connector->kdev->kobj, 7813 &amdgpu_group); 7814 if (r) 7815 return r; 7816 } 7817 7818 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 7819 7820 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7821 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7822 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7823 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7824 if (r) 7825 return r; 7826 } 7827 7828 #if defined(CONFIG_DEBUG_FS) 7829 connector_debugfs_init(amdgpu_dm_connector); 7830 #endif 7831 7832 return 0; 7833 } 7834 7835 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 7836 { 7837 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7838 struct dc_link *dc_link = aconnector->dc_link; 7839 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 7840 const struct drm_edid *drm_edid; 7841 struct i2c_adapter *ddc; 7842 struct drm_device *dev = connector->dev; 7843 7844 if (dc_link && dc_link->aux_mode) 7845 ddc = &aconnector->dm_dp_aux.aux.ddc; 7846 else 7847 ddc = &aconnector->i2c->base; 7848 7849 drm_edid = drm_edid_read_ddc(connector, ddc); 7850 drm_edid_connector_update(connector, drm_edid); 7851 if (!drm_edid) { 7852 drm_err(dev, "No EDID found on connector: %s.\n", connector->name); 7853 return; 7854 } 7855 7856 aconnector->drm_edid = drm_edid; 7857 /* Update emulated (virtual) sink's EDID */ 7858 if (dc_em_sink && dc_link) { 7859 // FIXME: Get rid of drm_edid_raw() 7860 const struct edid *edid = drm_edid_raw(drm_edid); 7861 7862 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 7863 memmove(dc_em_sink->dc_edid.raw_edid, edid, 7864 (edid->extensions + 1) * EDID_LENGTH); 7865 dm_helpers_parse_edid_caps( 7866 dc_link, 7867 &dc_em_sink->dc_edid, 7868 &dc_em_sink->edid_caps); 7869 } 7870 } 7871 7872 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7873 .reset = amdgpu_dm_connector_funcs_reset, 7874 .detect = amdgpu_dm_connector_detect, 7875 .fill_modes = drm_helper_probe_single_connector_modes, 7876 .destroy = amdgpu_dm_connector_destroy, 7877 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7878 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7879 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7880 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7881 .late_register = amdgpu_dm_connector_late_register, 7882 .early_unregister = amdgpu_dm_connector_unregister, 7883 .force = amdgpu_dm_connector_funcs_force 7884 }; 7885 7886 static int get_modes(struct drm_connector *connector) 7887 { 7888 return amdgpu_dm_connector_get_modes(connector); 7889 } 7890 7891 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7892 { 7893 struct drm_connector *connector = &aconnector->base; 7894 struct dc_link *dc_link = aconnector->dc_link; 7895 struct dc_sink_init_data init_params = { 7896 .link = aconnector->dc_link, 7897 .sink_signal = SIGNAL_TYPE_VIRTUAL 7898 }; 7899 const struct drm_edid *drm_edid; 7900 const struct edid *edid; 7901 struct i2c_adapter *ddc; 7902 7903 if (dc_link && dc_link->aux_mode) 7904 ddc = &aconnector->dm_dp_aux.aux.ddc; 7905 else 7906 ddc = &aconnector->i2c->base; 7907 7908 drm_edid = drm_edid_read_ddc(connector, ddc); 7909 drm_edid_connector_update(connector, drm_edid); 7910 if (!drm_edid) { 7911 drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name); 7912 return; 7913 } 7914 7915 if (connector->display_info.is_hdmi) 7916 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 7917 7918 aconnector->drm_edid = drm_edid; 7919 7920 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 7921 aconnector->dc_em_sink = dc_link_add_remote_sink( 7922 aconnector->dc_link, 7923 (uint8_t *)edid, 7924 (edid->extensions + 1) * EDID_LENGTH, 7925 &init_params); 7926 7927 if (aconnector->base.force == DRM_FORCE_ON) { 7928 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7929 aconnector->dc_link->local_sink : 7930 aconnector->dc_em_sink; 7931 if (aconnector->dc_sink) 7932 dc_sink_retain(aconnector->dc_sink); 7933 } 7934 } 7935 7936 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7937 { 7938 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7939 7940 /* 7941 * In case of headless boot with force on for DP managed connector 7942 * Those settings have to be != 0 to get initial modeset 7943 */ 7944 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7945 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7946 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7947 } 7948 7949 create_eml_sink(aconnector); 7950 } 7951 7952 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 7953 struct dc_stream_state *stream) 7954 { 7955 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 7956 struct dc_plane_state *dc_plane_state = NULL; 7957 struct dc_state *dc_state = NULL; 7958 7959 if (!stream) 7960 goto cleanup; 7961 7962 dc_plane_state = dc_create_plane_state(dc); 7963 if (!dc_plane_state) 7964 goto cleanup; 7965 7966 dc_state = dc_state_create(dc, NULL); 7967 if (!dc_state) 7968 goto cleanup; 7969 7970 /* populate stream to plane */ 7971 dc_plane_state->src_rect.height = stream->src.height; 7972 dc_plane_state->src_rect.width = stream->src.width; 7973 dc_plane_state->dst_rect.height = stream->src.height; 7974 dc_plane_state->dst_rect.width = stream->src.width; 7975 dc_plane_state->clip_rect.height = stream->src.height; 7976 dc_plane_state->clip_rect.width = stream->src.width; 7977 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 7978 dc_plane_state->plane_size.surface_size.height = stream->src.height; 7979 dc_plane_state->plane_size.surface_size.width = stream->src.width; 7980 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 7981 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 7982 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 7983 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 7984 dc_plane_state->rotation = ROTATION_ANGLE_0; 7985 dc_plane_state->is_tiling_rotated = false; 7986 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 7987 7988 dc_result = dc_validate_stream(dc, stream); 7989 if (dc_result == DC_OK) 7990 dc_result = dc_validate_plane(dc, dc_plane_state); 7991 7992 if (dc_result == DC_OK) 7993 dc_result = dc_state_add_stream(dc, dc_state, stream); 7994 7995 if (dc_result == DC_OK && !dc_state_add_plane( 7996 dc, 7997 stream, 7998 dc_plane_state, 7999 dc_state)) 8000 dc_result = DC_FAIL_ATTACH_SURFACES; 8001 8002 if (dc_result == DC_OK) 8003 dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY); 8004 8005 cleanup: 8006 if (dc_state) 8007 dc_state_release(dc_state); 8008 8009 if (dc_plane_state) 8010 dc_plane_state_release(dc_plane_state); 8011 8012 return dc_result; 8013 } 8014 8015 struct dc_stream_state * 8016 create_validate_stream_for_sink(struct drm_connector *connector, 8017 const struct drm_display_mode *drm_mode, 8018 const struct dm_connector_state *dm_state, 8019 const struct dc_stream_state *old_stream) 8020 { 8021 struct amdgpu_dm_connector *aconnector = NULL; 8022 struct amdgpu_device *adev = drm_to_adev(connector->dev); 8023 struct dc_stream_state *stream; 8024 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 8025 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 8026 enum dc_status dc_result = DC_OK; 8027 uint8_t bpc_limit = 6; 8028 8029 if (!dm_state) 8030 return NULL; 8031 8032 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 8033 aconnector = to_amdgpu_dm_connector(connector); 8034 8035 if (aconnector && 8036 (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || 8037 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) 8038 bpc_limit = 8; 8039 8040 do { 8041 drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc); 8042 stream = create_stream_for_sink(connector, drm_mode, 8043 dm_state, old_stream, 8044 requested_bpc); 8045 if (stream == NULL) { 8046 drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n"); 8047 break; 8048 } 8049 8050 dc_result = dc_validate_stream(adev->dm.dc, stream); 8051 8052 if (!aconnector) /* writeback connector */ 8053 return stream; 8054 8055 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 8056 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 8057 8058 if (dc_result == DC_OK) 8059 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 8060 8061 if (dc_result != DC_OK) { 8062 drm_dbg_kms(connector->dev, "Pruned mode %d x %d (clk %d) %s %s -- %s\n", 8063 drm_mode->hdisplay, 8064 drm_mode->vdisplay, 8065 drm_mode->clock, 8066 dc_pixel_encoding_to_str(stream->timing.pixel_encoding), 8067 dc_color_depth_to_str(stream->timing.display_color_depth), 8068 dc_status_to_str(dc_result)); 8069 8070 dc_stream_release(stream); 8071 stream = NULL; 8072 requested_bpc -= 2; /* lower bpc to retry validation */ 8073 } 8074 8075 } while (stream == NULL && requested_bpc >= bpc_limit); 8076 8077 switch (dc_result) { 8078 /* 8079 * If we failed to validate DP bandwidth stream with the requested RGB color depth, 8080 * we try to fallback and configure in order: 8081 * YUV422 (8bpc, 6bpc) 8082 * YUV420 (8bpc, 6bpc) 8083 */ 8084 case DC_FAIL_ENC_VALIDATE: 8085 case DC_EXCEED_DONGLE_CAP: 8086 case DC_NO_DP_LINK_BANDWIDTH: 8087 /* recursively entered twice and already tried both YUV422 and YUV420 */ 8088 if (aconnector->force_yuv422_output && aconnector->force_yuv420_output) 8089 break; 8090 /* first failure; try YUV422 */ 8091 if (!aconnector->force_yuv422_output) { 8092 drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n", 8093 __func__, __LINE__, dc_result); 8094 aconnector->force_yuv422_output = true; 8095 /* recursively entered and YUV422 failed, try YUV420 */ 8096 } else if (!aconnector->force_yuv420_output) { 8097 drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n", 8098 __func__, __LINE__, dc_result); 8099 aconnector->force_yuv420_output = true; 8100 } 8101 stream = create_validate_stream_for_sink(connector, drm_mode, 8102 dm_state, old_stream); 8103 aconnector->force_yuv422_output = false; 8104 aconnector->force_yuv420_output = false; 8105 break; 8106 case DC_OK: 8107 break; 8108 default: 8109 drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n", 8110 __func__, __LINE__, dc_result); 8111 break; 8112 } 8113 8114 return stream; 8115 } 8116 8117 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 8118 const struct drm_display_mode *mode) 8119 { 8120 int result = MODE_ERROR; 8121 struct dc_sink *dc_sink; 8122 struct drm_display_mode *test_mode; 8123 /* TODO: Unhardcode stream count */ 8124 struct dc_stream_state *stream; 8125 /* we always have an amdgpu_dm_connector here since we got 8126 * here via the amdgpu_dm_connector_helper_funcs 8127 */ 8128 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8129 8130 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 8131 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 8132 return result; 8133 8134 /* 8135 * Only run this the first time mode_valid is called to initilialize 8136 * EDID mgmt 8137 */ 8138 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 8139 !aconnector->dc_em_sink) 8140 handle_edid_mgmt(aconnector); 8141 8142 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 8143 8144 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 8145 aconnector->base.force != DRM_FORCE_ON) { 8146 drm_err(connector->dev, "dc_sink is NULL!\n"); 8147 goto fail; 8148 } 8149 8150 test_mode = drm_mode_duplicate(connector->dev, mode); 8151 if (!test_mode) 8152 goto fail; 8153 8154 drm_mode_set_crtcinfo(test_mode, 0); 8155 8156 stream = create_validate_stream_for_sink(connector, test_mode, 8157 to_dm_connector_state(connector->state), 8158 NULL); 8159 drm_mode_destroy(connector->dev, test_mode); 8160 if (stream) { 8161 dc_stream_release(stream); 8162 result = MODE_OK; 8163 } 8164 8165 fail: 8166 /* TODO: error handling*/ 8167 return result; 8168 } 8169 8170 static int fill_hdr_info_packet(const struct drm_connector_state *state, 8171 struct dc_info_packet *out) 8172 { 8173 struct hdmi_drm_infoframe frame; 8174 unsigned char buf[30]; /* 26 + 4 */ 8175 ssize_t len; 8176 int ret, i; 8177 8178 memset(out, 0, sizeof(*out)); 8179 8180 if (!state->hdr_output_metadata) 8181 return 0; 8182 8183 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 8184 if (ret) 8185 return ret; 8186 8187 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 8188 if (len < 0) 8189 return (int)len; 8190 8191 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 8192 if (len != 30) 8193 return -EINVAL; 8194 8195 /* Prepare the infopacket for DC. */ 8196 switch (state->connector->connector_type) { 8197 case DRM_MODE_CONNECTOR_HDMIA: 8198 out->hb0 = 0x87; /* type */ 8199 out->hb1 = 0x01; /* version */ 8200 out->hb2 = 0x1A; /* length */ 8201 out->sb[0] = buf[3]; /* checksum */ 8202 i = 1; 8203 break; 8204 8205 case DRM_MODE_CONNECTOR_DisplayPort: 8206 case DRM_MODE_CONNECTOR_eDP: 8207 out->hb0 = 0x00; /* sdp id, zero */ 8208 out->hb1 = 0x87; /* type */ 8209 out->hb2 = 0x1D; /* payload len - 1 */ 8210 out->hb3 = (0x13 << 2); /* sdp version */ 8211 out->sb[0] = 0x01; /* version */ 8212 out->sb[1] = 0x1A; /* length */ 8213 i = 2; 8214 break; 8215 8216 default: 8217 return -EINVAL; 8218 } 8219 8220 memcpy(&out->sb[i], &buf[4], 26); 8221 out->valid = true; 8222 8223 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 8224 sizeof(out->sb), false); 8225 8226 return 0; 8227 } 8228 8229 static int 8230 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 8231 struct drm_atomic_state *state) 8232 { 8233 struct drm_connector_state *new_con_state = 8234 drm_atomic_get_new_connector_state(state, conn); 8235 struct drm_connector_state *old_con_state = 8236 drm_atomic_get_old_connector_state(state, conn); 8237 struct drm_crtc *crtc = new_con_state->crtc; 8238 struct drm_crtc_state *new_crtc_state; 8239 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 8240 int ret; 8241 8242 if (WARN_ON(unlikely(!old_con_state || !new_con_state))) 8243 return -EINVAL; 8244 8245 trace_amdgpu_dm_connector_atomic_check(new_con_state); 8246 8247 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 8248 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 8249 if (ret < 0) 8250 return ret; 8251 } 8252 8253 if (!crtc) 8254 return 0; 8255 8256 if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) { 8257 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 8258 if (IS_ERR(new_crtc_state)) 8259 return PTR_ERR(new_crtc_state); 8260 8261 new_crtc_state->mode_changed = true; 8262 } 8263 8264 if (new_con_state->colorspace != old_con_state->colorspace) { 8265 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 8266 if (IS_ERR(new_crtc_state)) 8267 return PTR_ERR(new_crtc_state); 8268 8269 new_crtc_state->mode_changed = true; 8270 } 8271 8272 if (new_con_state->content_type != old_con_state->content_type) { 8273 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 8274 if (IS_ERR(new_crtc_state)) 8275 return PTR_ERR(new_crtc_state); 8276 8277 new_crtc_state->mode_changed = true; 8278 } 8279 8280 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 8281 struct dc_info_packet hdr_infopacket; 8282 8283 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 8284 if (ret) 8285 return ret; 8286 8287 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 8288 if (IS_ERR(new_crtc_state)) 8289 return PTR_ERR(new_crtc_state); 8290 8291 /* 8292 * DC considers the stream backends changed if the 8293 * static metadata changes. Forcing the modeset also 8294 * gives a simple way for userspace to switch from 8295 * 8bpc to 10bpc when setting the metadata to enter 8296 * or exit HDR. 8297 * 8298 * Changing the static metadata after it's been 8299 * set is permissible, however. So only force a 8300 * modeset if we're entering or exiting HDR. 8301 */ 8302 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 8303 !old_con_state->hdr_output_metadata || 8304 !new_con_state->hdr_output_metadata; 8305 } 8306 8307 return 0; 8308 } 8309 8310 static const struct drm_connector_helper_funcs 8311 amdgpu_dm_connector_helper_funcs = { 8312 /* 8313 * If hotplugging a second bigger display in FB Con mode, bigger resolution 8314 * modes will be filtered by drm_mode_validate_size(), and those modes 8315 * are missing after user start lightdm. So we need to renew modes list. 8316 * in get_modes call back, not just return the modes count 8317 */ 8318 .get_modes = get_modes, 8319 .mode_valid = amdgpu_dm_connector_mode_valid, 8320 .atomic_check = amdgpu_dm_connector_atomic_check, 8321 }; 8322 8323 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 8324 { 8325 8326 } 8327 8328 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 8329 { 8330 switch (display_color_depth) { 8331 case COLOR_DEPTH_666: 8332 return 6; 8333 case COLOR_DEPTH_888: 8334 return 8; 8335 case COLOR_DEPTH_101010: 8336 return 10; 8337 case COLOR_DEPTH_121212: 8338 return 12; 8339 case COLOR_DEPTH_141414: 8340 return 14; 8341 case COLOR_DEPTH_161616: 8342 return 16; 8343 default: 8344 break; 8345 } 8346 return 0; 8347 } 8348 8349 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 8350 struct drm_crtc_state *crtc_state, 8351 struct drm_connector_state *conn_state) 8352 { 8353 struct drm_atomic_state *state = crtc_state->state; 8354 struct drm_connector *connector = conn_state->connector; 8355 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8356 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 8357 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 8358 struct drm_dp_mst_topology_mgr *mst_mgr; 8359 struct drm_dp_mst_port *mst_port; 8360 struct drm_dp_mst_topology_state *mst_state; 8361 enum dc_color_depth color_depth; 8362 int clock, bpp = 0; 8363 bool is_y420 = false; 8364 8365 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || 8366 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { 8367 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8368 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8369 enum drm_mode_status result; 8370 8371 result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode); 8372 if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) { 8373 drm_dbg_driver(encoder->dev, 8374 "mode %dx%d@%dHz is not native, enabling scaling\n", 8375 adjusted_mode->hdisplay, adjusted_mode->vdisplay, 8376 drm_mode_vrefresh(adjusted_mode)); 8377 dm_new_connector_state->scaling = RMX_ASPECT; 8378 } 8379 return 0; 8380 } 8381 8382 if (!aconnector->mst_output_port) 8383 return 0; 8384 8385 mst_port = aconnector->mst_output_port; 8386 mst_mgr = &aconnector->mst_root->mst_mgr; 8387 8388 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 8389 return 0; 8390 8391 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 8392 if (IS_ERR(mst_state)) 8393 return PTR_ERR(mst_state); 8394 8395 mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); 8396 8397 if (!state->duplicated) { 8398 int max_bpc = conn_state->max_requested_bpc; 8399 8400 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 8401 aconnector->force_yuv420_output; 8402 color_depth = convert_color_depth_from_display_info(connector, 8403 is_y420, 8404 max_bpc); 8405 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 8406 clock = adjusted_mode->clock; 8407 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 8408 } 8409 8410 dm_new_connector_state->vcpi_slots = 8411 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 8412 dm_new_connector_state->pbn); 8413 if (dm_new_connector_state->vcpi_slots < 0) { 8414 drm_dbg_atomic(connector->dev, "failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 8415 return dm_new_connector_state->vcpi_slots; 8416 } 8417 return 0; 8418 } 8419 8420 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 8421 .disable = dm_encoder_helper_disable, 8422 .atomic_check = dm_encoder_helper_atomic_check 8423 }; 8424 8425 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 8426 struct dc_state *dc_state, 8427 struct dsc_mst_fairness_vars *vars) 8428 { 8429 struct dc_stream_state *stream = NULL; 8430 struct drm_connector *connector; 8431 struct drm_connector_state *new_con_state; 8432 struct amdgpu_dm_connector *aconnector; 8433 struct dm_connector_state *dm_conn_state; 8434 int i, j, ret; 8435 int vcpi, pbn_div, pbn = 0, slot_num = 0; 8436 8437 for_each_new_connector_in_state(state, connector, new_con_state, i) { 8438 8439 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 8440 continue; 8441 8442 aconnector = to_amdgpu_dm_connector(connector); 8443 8444 if (!aconnector->mst_output_port) 8445 continue; 8446 8447 if (!new_con_state || !new_con_state->crtc) 8448 continue; 8449 8450 dm_conn_state = to_dm_connector_state(new_con_state); 8451 8452 for (j = 0; j < dc_state->stream_count; j++) { 8453 stream = dc_state->streams[j]; 8454 if (!stream) 8455 continue; 8456 8457 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 8458 break; 8459 8460 stream = NULL; 8461 } 8462 8463 if (!stream) 8464 continue; 8465 8466 pbn_div = dm_mst_get_pbn_divider(stream->link); 8467 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 8468 for (j = 0; j < dc_state->stream_count; j++) { 8469 if (vars[j].aconnector == aconnector) { 8470 pbn = vars[j].pbn; 8471 break; 8472 } 8473 } 8474 8475 if (j == dc_state->stream_count || pbn_div == 0) 8476 continue; 8477 8478 slot_num = DIV_ROUND_UP(pbn, pbn_div); 8479 8480 if (stream->timing.flags.DSC != 1) { 8481 dm_conn_state->pbn = pbn; 8482 dm_conn_state->vcpi_slots = slot_num; 8483 8484 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 8485 dm_conn_state->pbn, false); 8486 if (ret < 0) 8487 return ret; 8488 8489 continue; 8490 } 8491 8492 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 8493 if (vcpi < 0) 8494 return vcpi; 8495 8496 dm_conn_state->pbn = pbn; 8497 dm_conn_state->vcpi_slots = vcpi; 8498 } 8499 return 0; 8500 } 8501 8502 static int to_drm_connector_type(enum signal_type st, uint32_t connector_id) 8503 { 8504 switch (st) { 8505 case SIGNAL_TYPE_HDMI_TYPE_A: 8506 return DRM_MODE_CONNECTOR_HDMIA; 8507 case SIGNAL_TYPE_EDP: 8508 return DRM_MODE_CONNECTOR_eDP; 8509 case SIGNAL_TYPE_LVDS: 8510 return DRM_MODE_CONNECTOR_LVDS; 8511 case SIGNAL_TYPE_RGB: 8512 return DRM_MODE_CONNECTOR_VGA; 8513 case SIGNAL_TYPE_DISPLAY_PORT: 8514 case SIGNAL_TYPE_DISPLAY_PORT_MST: 8515 /* External DP bridges have a different connector type. */ 8516 if (connector_id == CONNECTOR_ID_VGA) 8517 return DRM_MODE_CONNECTOR_VGA; 8518 else if (connector_id == CONNECTOR_ID_LVDS) 8519 return DRM_MODE_CONNECTOR_LVDS; 8520 8521 return DRM_MODE_CONNECTOR_DisplayPort; 8522 case SIGNAL_TYPE_DVI_DUAL_LINK: 8523 case SIGNAL_TYPE_DVI_SINGLE_LINK: 8524 if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII || 8525 connector_id == CONNECTOR_ID_DUAL_LINK_DVII) 8526 return DRM_MODE_CONNECTOR_DVII; 8527 8528 return DRM_MODE_CONNECTOR_DVID; 8529 case SIGNAL_TYPE_VIRTUAL: 8530 return DRM_MODE_CONNECTOR_VIRTUAL; 8531 8532 default: 8533 return DRM_MODE_CONNECTOR_Unknown; 8534 } 8535 } 8536 8537 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 8538 { 8539 struct drm_encoder *encoder; 8540 8541 /* There is only one encoder per connector */ 8542 drm_connector_for_each_possible_encoder(connector, encoder) 8543 return encoder; 8544 8545 return NULL; 8546 } 8547 8548 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 8549 { 8550 struct drm_encoder *encoder; 8551 struct amdgpu_encoder *amdgpu_encoder; 8552 8553 encoder = amdgpu_dm_connector_to_encoder(connector); 8554 8555 if (encoder == NULL) 8556 return; 8557 8558 amdgpu_encoder = to_amdgpu_encoder(encoder); 8559 8560 amdgpu_encoder->native_mode.clock = 0; 8561 8562 if (!list_empty(&connector->probed_modes)) { 8563 struct drm_display_mode *preferred_mode = NULL; 8564 8565 list_for_each_entry(preferred_mode, 8566 &connector->probed_modes, 8567 head) { 8568 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 8569 amdgpu_encoder->native_mode = *preferred_mode; 8570 8571 break; 8572 } 8573 8574 } 8575 } 8576 8577 static struct drm_display_mode * 8578 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 8579 const char *name, 8580 int hdisplay, int vdisplay) 8581 { 8582 struct drm_device *dev = encoder->dev; 8583 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8584 struct drm_display_mode *mode = NULL; 8585 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8586 8587 mode = drm_mode_duplicate(dev, native_mode); 8588 8589 if (mode == NULL) 8590 return NULL; 8591 8592 mode->hdisplay = hdisplay; 8593 mode->vdisplay = vdisplay; 8594 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8595 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 8596 8597 return mode; 8598 8599 } 8600 8601 static const struct amdgpu_dm_mode_size { 8602 char name[DRM_DISPLAY_MODE_LEN]; 8603 int w; 8604 int h; 8605 } common_modes[] = { 8606 { "640x480", 640, 480}, 8607 { "800x600", 800, 600}, 8608 { "1024x768", 1024, 768}, 8609 { "1280x720", 1280, 720}, 8610 { "1280x800", 1280, 800}, 8611 {"1280x1024", 1280, 1024}, 8612 { "1440x900", 1440, 900}, 8613 {"1680x1050", 1680, 1050}, 8614 {"1600x1200", 1600, 1200}, 8615 {"1920x1080", 1920, 1080}, 8616 {"1920x1200", 1920, 1200} 8617 }; 8618 8619 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 8620 struct drm_connector *connector) 8621 { 8622 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8623 struct drm_display_mode *mode = NULL; 8624 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8625 struct amdgpu_dm_connector *amdgpu_dm_connector = 8626 to_amdgpu_dm_connector(connector); 8627 int i; 8628 int n; 8629 8630 if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && 8631 (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) 8632 return; 8633 8634 n = ARRAY_SIZE(common_modes); 8635 8636 for (i = 0; i < n; i++) { 8637 struct drm_display_mode *curmode = NULL; 8638 bool mode_existed = false; 8639 8640 if (common_modes[i].w > native_mode->hdisplay || 8641 common_modes[i].h > native_mode->vdisplay || 8642 (common_modes[i].w == native_mode->hdisplay && 8643 common_modes[i].h == native_mode->vdisplay)) 8644 continue; 8645 8646 list_for_each_entry(curmode, &connector->probed_modes, head) { 8647 if (common_modes[i].w == curmode->hdisplay && 8648 common_modes[i].h == curmode->vdisplay) { 8649 mode_existed = true; 8650 break; 8651 } 8652 } 8653 8654 if (mode_existed) 8655 continue; 8656 8657 mode = amdgpu_dm_create_common_mode(encoder, 8658 common_modes[i].name, common_modes[i].w, 8659 common_modes[i].h); 8660 if (!mode) 8661 continue; 8662 8663 drm_mode_probed_add(connector, mode); 8664 amdgpu_dm_connector->num_modes++; 8665 } 8666 } 8667 8668 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 8669 { 8670 struct drm_encoder *encoder; 8671 struct amdgpu_encoder *amdgpu_encoder; 8672 const struct drm_display_mode *native_mode; 8673 8674 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 8675 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 8676 return; 8677 8678 mutex_lock(&connector->dev->mode_config.mutex); 8679 amdgpu_dm_connector_get_modes(connector); 8680 mutex_unlock(&connector->dev->mode_config.mutex); 8681 8682 encoder = amdgpu_dm_connector_to_encoder(connector); 8683 if (!encoder) 8684 return; 8685 8686 amdgpu_encoder = to_amdgpu_encoder(encoder); 8687 8688 native_mode = &amdgpu_encoder->native_mode; 8689 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 8690 return; 8691 8692 drm_connector_set_panel_orientation_with_quirk(connector, 8693 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 8694 native_mode->hdisplay, 8695 native_mode->vdisplay); 8696 } 8697 8698 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 8699 const struct drm_edid *drm_edid) 8700 { 8701 struct amdgpu_dm_connector *amdgpu_dm_connector = 8702 to_amdgpu_dm_connector(connector); 8703 8704 if (drm_edid) { 8705 /* empty probed_modes */ 8706 INIT_LIST_HEAD(&connector->probed_modes); 8707 amdgpu_dm_connector->num_modes = 8708 drm_edid_connector_add_modes(connector); 8709 8710 /* sorting the probed modes before calling function 8711 * amdgpu_dm_get_native_mode() since EDID can have 8712 * more than one preferred mode. The modes that are 8713 * later in the probed mode list could be of higher 8714 * and preferred resolution. For example, 3840x2160 8715 * resolution in base EDID preferred timing and 4096x2160 8716 * preferred resolution in DID extension block later. 8717 */ 8718 drm_mode_sort(&connector->probed_modes); 8719 amdgpu_dm_get_native_mode(connector); 8720 8721 /* Freesync capabilities are reset by calling 8722 * drm_edid_connector_add_modes() and need to be 8723 * restored here. 8724 */ 8725 amdgpu_dm_update_freesync_caps(connector, drm_edid); 8726 } else { 8727 amdgpu_dm_connector->num_modes = 0; 8728 } 8729 } 8730 8731 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 8732 struct drm_display_mode *mode) 8733 { 8734 struct drm_display_mode *m; 8735 8736 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 8737 if (drm_mode_equal(m, mode)) 8738 return true; 8739 } 8740 8741 return false; 8742 } 8743 8744 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 8745 { 8746 const struct drm_display_mode *m; 8747 struct drm_display_mode *new_mode; 8748 uint i; 8749 u32 new_modes_count = 0; 8750 8751 /* Standard FPS values 8752 * 8753 * 23.976 - TV/NTSC 8754 * 24 - Cinema 8755 * 25 - TV/PAL 8756 * 29.97 - TV/NTSC 8757 * 30 - TV/NTSC 8758 * 48 - Cinema HFR 8759 * 50 - TV/PAL 8760 * 60 - Commonly used 8761 * 48,72,96,120 - Multiples of 24 8762 */ 8763 static const u32 common_rates[] = { 8764 23976, 24000, 25000, 29970, 30000, 8765 48000, 50000, 60000, 72000, 96000, 120000 8766 }; 8767 8768 /* 8769 * Find mode with highest refresh rate with the same resolution 8770 * as the preferred mode. Some monitors report a preferred mode 8771 * with lower resolution than the highest refresh rate supported. 8772 */ 8773 8774 m = get_highest_refresh_rate_mode(aconnector, true); 8775 if (!m) 8776 return 0; 8777 8778 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8779 u64 target_vtotal, target_vtotal_diff; 8780 u64 num, den; 8781 8782 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8783 continue; 8784 8785 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8786 common_rates[i] > aconnector->max_vfreq * 1000) 8787 continue; 8788 8789 num = (unsigned long long)m->clock * 1000 * 1000; 8790 den = common_rates[i] * (unsigned long long)m->htotal; 8791 target_vtotal = div_u64(num, den); 8792 target_vtotal_diff = target_vtotal - m->vtotal; 8793 8794 /* Check for illegal modes */ 8795 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8796 m->vsync_end + target_vtotal_diff < m->vsync_start || 8797 m->vtotal + target_vtotal_diff < m->vsync_end) 8798 continue; 8799 8800 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8801 if (!new_mode) 8802 goto out; 8803 8804 new_mode->vtotal += (u16)target_vtotal_diff; 8805 new_mode->vsync_start += (u16)target_vtotal_diff; 8806 new_mode->vsync_end += (u16)target_vtotal_diff; 8807 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8808 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8809 8810 if (!is_duplicate_mode(aconnector, new_mode)) { 8811 drm_mode_probed_add(&aconnector->base, new_mode); 8812 new_modes_count += 1; 8813 } else 8814 drm_mode_destroy(aconnector->base.dev, new_mode); 8815 } 8816 out: 8817 return new_modes_count; 8818 } 8819 8820 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8821 const struct drm_edid *drm_edid) 8822 { 8823 struct amdgpu_dm_connector *amdgpu_dm_connector = 8824 to_amdgpu_dm_connector(connector); 8825 8826 if (!(amdgpu_freesync_vid_mode && drm_edid)) 8827 return; 8828 8829 if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link) 8830 return; 8831 8832 if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version)) 8833 return; 8834 8835 if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) && 8836 amdgpu_dm_connector->dc_sink->edid_caps.analog) 8837 return; 8838 8839 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8840 amdgpu_dm_connector->num_modes += 8841 add_fs_modes(amdgpu_dm_connector); 8842 } 8843 8844 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8845 { 8846 struct amdgpu_dm_connector *amdgpu_dm_connector = 8847 to_amdgpu_dm_connector(connector); 8848 struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 8849 struct drm_encoder *encoder; 8850 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; 8851 struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap; 8852 const struct dc *dc = dc_link->dc; 8853 8854 encoder = amdgpu_dm_connector_to_encoder(connector); 8855 8856 if (!drm_edid) { 8857 amdgpu_dm_connector->num_modes = 8858 drm_add_modes_noedid(connector, 640, 480); 8859 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 8860 amdgpu_dm_connector->num_modes += 8861 drm_add_modes_noedid(connector, 1920, 1080); 8862 8863 if (amdgpu_dm_connector->dc_sink && 8864 amdgpu_dm_connector->dc_sink->edid_caps.analog && 8865 dc_connector_supports_analog(dc_link->link_id.id)) { 8866 /* Analog monitor connected by DAC load detection. 8867 * Add common modes. It will be up to the user to select one that works. 8868 */ 8869 for (int i = 0; i < ARRAY_SIZE(common_modes); i++) 8870 amdgpu_dm_connector->num_modes += drm_add_modes_noedid( 8871 connector, common_modes[i].w, common_modes[i].h); 8872 } 8873 } else { 8874 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); 8875 if (encoder) 8876 amdgpu_dm_connector_add_common_modes(encoder, connector); 8877 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); 8878 } 8879 amdgpu_dm_fbc_init(connector); 8880 8881 return amdgpu_dm_connector->num_modes; 8882 } 8883 8884 static const u32 supported_colorspaces = 8885 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 8886 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 8887 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 8888 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 8889 8890 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8891 struct amdgpu_dm_connector *aconnector, 8892 int connector_type, 8893 struct dc_link *link, 8894 int link_index) 8895 { 8896 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8897 8898 /* 8899 * Some of the properties below require access to state, like bpc. 8900 * Allocate some default initial connector state with our reset helper. 8901 */ 8902 if (aconnector->base.funcs->reset) 8903 aconnector->base.funcs->reset(&aconnector->base); 8904 8905 aconnector->connector_id = link_index; 8906 aconnector->bl_idx = -1; 8907 aconnector->dc_link = link; 8908 aconnector->base.interlace_allowed = false; 8909 aconnector->base.doublescan_allowed = false; 8910 aconnector->base.stereo_allowed = false; 8911 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8912 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8913 aconnector->audio_inst = -1; 8914 aconnector->pack_sdp_v1_3 = false; 8915 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 8916 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 8917 mutex_init(&aconnector->hpd_lock); 8918 mutex_init(&aconnector->handle_mst_msg_ready); 8919 8920 /* 8921 * If HDMI HPD debounce delay is set, use the minimum between selected 8922 * value and AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS 8923 */ 8924 if (amdgpu_hdmi_hpd_debounce_delay_ms) { 8925 aconnector->hdmi_hpd_debounce_delay_ms = min(amdgpu_hdmi_hpd_debounce_delay_ms, 8926 AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS); 8927 INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work); 8928 aconnector->hdmi_prev_sink = NULL; 8929 } else { 8930 aconnector->hdmi_hpd_debounce_delay_ms = 0; 8931 } 8932 8933 /* 8934 * configure support HPD hot plug connector_>polled default value is 0 8935 * which means HPD hot plug not supported 8936 */ 8937 switch (connector_type) { 8938 case DRM_MODE_CONNECTOR_HDMIA: 8939 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8940 aconnector->base.ycbcr_420_allowed = 8941 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8942 break; 8943 case DRM_MODE_CONNECTOR_DisplayPort: 8944 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8945 link->link_enc = link_enc_cfg_get_link_enc(link); 8946 ASSERT(link->link_enc); 8947 if (link->link_enc) 8948 aconnector->base.ycbcr_420_allowed = 8949 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8950 break; 8951 case DRM_MODE_CONNECTOR_DVID: 8952 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8953 break; 8954 case DRM_MODE_CONNECTOR_DVII: 8955 case DRM_MODE_CONNECTOR_VGA: 8956 aconnector->base.polled = 8957 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 8958 break; 8959 default: 8960 break; 8961 } 8962 8963 drm_object_attach_property(&aconnector->base.base, 8964 dm->ddev->mode_config.scaling_mode_property, 8965 DRM_MODE_SCALE_NONE); 8966 8967 if (connector_type == DRM_MODE_CONNECTOR_HDMIA 8968 || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root)) 8969 drm_connector_attach_broadcast_rgb_property(&aconnector->base); 8970 8971 drm_object_attach_property(&aconnector->base.base, 8972 adev->mode_info.underscan_property, 8973 UNDERSCAN_OFF); 8974 drm_object_attach_property(&aconnector->base.base, 8975 adev->mode_info.underscan_hborder_property, 8976 0); 8977 drm_object_attach_property(&aconnector->base.base, 8978 adev->mode_info.underscan_vborder_property, 8979 0); 8980 8981 if (!aconnector->mst_root) 8982 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8983 8984 aconnector->base.state->max_bpc = 16; 8985 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8986 8987 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8988 /* Content Type is currently only implemented for HDMI. */ 8989 drm_connector_attach_content_type_property(&aconnector->base); 8990 } 8991 8992 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8993 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 8994 drm_connector_attach_colorspace_property(&aconnector->base); 8995 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 8996 connector_type == DRM_MODE_CONNECTOR_eDP) { 8997 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 8998 drm_connector_attach_colorspace_property(&aconnector->base); 8999 } 9000 9001 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 9002 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 9003 connector_type == DRM_MODE_CONNECTOR_eDP) { 9004 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 9005 9006 if (!aconnector->mst_root) 9007 drm_connector_attach_vrr_capable_property(&aconnector->base); 9008 9009 if (adev->dm.hdcp_workqueue) 9010 drm_connector_attach_content_protection_property(&aconnector->base, true); 9011 } 9012 9013 if (connector_type == DRM_MODE_CONNECTOR_eDP) { 9014 struct drm_privacy_screen *privacy_screen; 9015 9016 privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL); 9017 if (!IS_ERR(privacy_screen)) { 9018 drm_connector_attach_privacy_screen_provider(&aconnector->base, 9019 privacy_screen); 9020 } else if (PTR_ERR(privacy_screen) != -ENODEV) { 9021 drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n"); 9022 } 9023 } 9024 } 9025 9026 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 9027 struct i2c_msg *msgs, int num) 9028 { 9029 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 9030 struct ddc_service *ddc_service = i2c->ddc_service; 9031 struct i2c_command cmd; 9032 int i; 9033 int result = -EIO; 9034 9035 if (!ddc_service->ddc_pin) 9036 return result; 9037 9038 cmd.payloads = kzalloc_objs(struct i2c_payload, num); 9039 9040 if (!cmd.payloads) 9041 return result; 9042 9043 cmd.number_of_payloads = num; 9044 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 9045 cmd.speed = 100; 9046 9047 for (i = 0; i < num; i++) { 9048 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 9049 cmd.payloads[i].address = msgs[i].addr; 9050 cmd.payloads[i].length = msgs[i].len; 9051 cmd.payloads[i].data = msgs[i].buf; 9052 } 9053 9054 if (i2c->oem) { 9055 if (dc_submit_i2c_oem( 9056 ddc_service->ctx->dc, 9057 &cmd)) 9058 result = num; 9059 } else { 9060 if (dc_submit_i2c( 9061 ddc_service->ctx->dc, 9062 ddc_service->link->link_index, 9063 &cmd)) 9064 result = num; 9065 } 9066 9067 kfree(cmd.payloads); 9068 return result; 9069 } 9070 9071 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 9072 { 9073 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 9074 } 9075 9076 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 9077 .master_xfer = amdgpu_dm_i2c_xfer, 9078 .functionality = amdgpu_dm_i2c_func, 9079 }; 9080 9081 static struct amdgpu_i2c_adapter * 9082 create_i2c(struct ddc_service *ddc_service, bool oem) 9083 { 9084 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 9085 struct amdgpu_i2c_adapter *i2c; 9086 9087 i2c = kzalloc_obj(struct amdgpu_i2c_adapter); 9088 if (!i2c) 9089 return NULL; 9090 i2c->base.owner = THIS_MODULE; 9091 i2c->base.dev.parent = &adev->pdev->dev; 9092 i2c->base.algo = &amdgpu_dm_i2c_algo; 9093 if (oem) 9094 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus"); 9095 else 9096 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", 9097 ddc_service->link->link_index); 9098 i2c_set_adapdata(&i2c->base, i2c); 9099 i2c->ddc_service = ddc_service; 9100 i2c->oem = oem; 9101 9102 return i2c; 9103 } 9104 9105 int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector) 9106 { 9107 struct cec_connector_info conn_info; 9108 struct drm_device *ddev = aconnector->base.dev; 9109 struct device *hdmi_dev = ddev->dev; 9110 9111 if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) { 9112 drm_info(ddev, "HDMI-CEC feature masked\n"); 9113 return -EINVAL; 9114 } 9115 9116 cec_fill_conn_info_from_drm(&conn_info, &aconnector->base); 9117 aconnector->notifier = 9118 cec_notifier_conn_register(hdmi_dev, NULL, &conn_info); 9119 if (!aconnector->notifier) { 9120 drm_err(ddev, "Failed to create cec notifier\n"); 9121 return -ENOMEM; 9122 } 9123 9124 return 0; 9125 } 9126 9127 /* 9128 * Note: this function assumes that dc_link_detect() was called for the 9129 * dc_link which will be represented by this aconnector. 9130 */ 9131 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 9132 struct amdgpu_dm_connector *aconnector, 9133 u32 link_index, 9134 struct amdgpu_encoder *aencoder) 9135 { 9136 int res = 0; 9137 int connector_type; 9138 struct dc *dc = dm->dc; 9139 struct dc_link *link = dc_get_link_at_index(dc, link_index); 9140 struct amdgpu_i2c_adapter *i2c; 9141 9142 /* Not needed for writeback connector */ 9143 link->priv = aconnector; 9144 9145 9146 i2c = create_i2c(link->ddc, false); 9147 if (!i2c) { 9148 drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n"); 9149 return -ENOMEM; 9150 } 9151 9152 aconnector->i2c = i2c; 9153 res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base); 9154 9155 if (res) { 9156 drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); 9157 goto out_free; 9158 } 9159 9160 connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id); 9161 9162 res = drm_connector_init_with_ddc( 9163 dm->ddev, 9164 &aconnector->base, 9165 &amdgpu_dm_connector_funcs, 9166 connector_type, 9167 &i2c->base); 9168 9169 if (res) { 9170 drm_err(adev_to_drm(dm->adev), "connector_init failed\n"); 9171 aconnector->connector_id = -1; 9172 goto out_free; 9173 } 9174 9175 drm_connector_helper_add( 9176 &aconnector->base, 9177 &amdgpu_dm_connector_helper_funcs); 9178 9179 amdgpu_dm_connector_init_helper( 9180 dm, 9181 aconnector, 9182 connector_type, 9183 link, 9184 link_index); 9185 9186 drm_connector_attach_encoder( 9187 &aconnector->base, &aencoder->base); 9188 9189 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 9190 connector_type == DRM_MODE_CONNECTOR_HDMIB) 9191 amdgpu_dm_initialize_hdmi_connector(aconnector); 9192 9193 if (dc_is_dp_signal(link->connector_signal)) 9194 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 9195 9196 out_free: 9197 if (res) { 9198 kfree(i2c); 9199 aconnector->i2c = NULL; 9200 } 9201 return res; 9202 } 9203 9204 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 9205 { 9206 switch (adev->mode_info.num_crtc) { 9207 case 1: 9208 return 0x1; 9209 case 2: 9210 return 0x3; 9211 case 3: 9212 return 0x7; 9213 case 4: 9214 return 0xf; 9215 case 5: 9216 return 0x1f; 9217 case 6: 9218 default: 9219 return 0x3f; 9220 } 9221 } 9222 9223 static int amdgpu_dm_encoder_init(struct drm_device *dev, 9224 struct amdgpu_encoder *aencoder, 9225 uint32_t link_index) 9226 { 9227 struct amdgpu_device *adev = drm_to_adev(dev); 9228 9229 int res = drm_encoder_init(dev, 9230 &aencoder->base, 9231 &amdgpu_dm_encoder_funcs, 9232 DRM_MODE_ENCODER_TMDS, 9233 NULL); 9234 9235 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 9236 9237 if (!res) 9238 aencoder->encoder_id = link_index; 9239 else 9240 aencoder->encoder_id = -1; 9241 9242 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 9243 9244 return res; 9245 } 9246 9247 static void manage_dm_interrupts(struct amdgpu_device *adev, 9248 struct amdgpu_crtc *acrtc, 9249 struct dm_crtc_state *acrtc_state) 9250 { /* 9251 * We cannot be sure that the frontend index maps to the same 9252 * backend index - some even map to more than one. 9253 * So we have to go through the CRTC to find the right IRQ. 9254 */ 9255 int irq_type = amdgpu_display_crtc_idx_to_irq_type( 9256 adev, 9257 acrtc->crtc_id); 9258 struct drm_device *dev = adev_to_drm(adev); 9259 9260 struct drm_vblank_crtc_config config = {0}; 9261 struct dc_crtc_timing *timing; 9262 int offdelay; 9263 9264 if (acrtc_state) { 9265 timing = &acrtc_state->stream->timing; 9266 9267 /* 9268 * Depending on when the HW latching event of double-buffered 9269 * registers happen relative to the PSR SDP deadline, and how 9270 * bad the Panel clock has drifted since the last ALPM off 9271 * event, there can be up to 3 frames of delay between sending 9272 * the PSR exit cmd to DMUB fw, and when the panel starts 9273 * displaying live frames. 9274 * 9275 * We can set: 9276 * 9277 * 20/100 * offdelay_ms = 3_frames_ms 9278 * => offdelay_ms = 5 * 3_frames_ms 9279 * 9280 * This ensures that `3_frames_ms` will only be experienced as a 9281 * 20% delay on top how long the display has been static, and 9282 * thus make the delay less perceivable. 9283 */ 9284 if (acrtc_state->stream->link->psr_settings.psr_version < 9285 DC_PSR_VERSION_UNSUPPORTED) { 9286 offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * 9287 timing->v_total * 9288 timing->h_total, 9289 timing->pix_clk_100hz); 9290 config.offdelay_ms = offdelay ?: 30; 9291 } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 9292 IP_VERSION(3, 5, 0) || 9293 !(adev->flags & AMD_IS_APU)) { 9294 /* 9295 * Older HW and DGPU have issues with instant off; 9296 * use a 2 frame offdelay. 9297 */ 9298 offdelay = DIV64_U64_ROUND_UP((u64)20 * 9299 timing->v_total * 9300 timing->h_total, 9301 timing->pix_clk_100hz); 9302 9303 config.offdelay_ms = offdelay ?: 30; 9304 } else { 9305 /* offdelay_ms = 0 will never disable vblank */ 9306 config.offdelay_ms = 1; 9307 config.disable_immediate = true; 9308 } 9309 9310 drm_crtc_vblank_on_config(&acrtc->base, 9311 &config); 9312 /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ 9313 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 9314 case IP_VERSION(3, 0, 0): 9315 case IP_VERSION(3, 0, 2): 9316 case IP_VERSION(3, 0, 3): 9317 case IP_VERSION(3, 2, 0): 9318 if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) 9319 drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); 9320 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9321 if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) 9322 drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); 9323 #endif 9324 } 9325 9326 } else { 9327 /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ 9328 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 9329 case IP_VERSION(3, 0, 0): 9330 case IP_VERSION(3, 0, 2): 9331 case IP_VERSION(3, 0, 3): 9332 case IP_VERSION(3, 2, 0): 9333 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9334 if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) 9335 drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); 9336 #endif 9337 if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) 9338 drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); 9339 } 9340 9341 drm_crtc_vblank_off(&acrtc->base); 9342 } 9343 } 9344 9345 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 9346 struct amdgpu_crtc *acrtc) 9347 { 9348 int irq_type = 9349 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 9350 9351 /** 9352 * This reads the current state for the IRQ and force reapplies 9353 * the setting to hardware. 9354 */ 9355 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 9356 } 9357 9358 static bool 9359 is_scaling_state_different(const struct dm_connector_state *dm_state, 9360 const struct dm_connector_state *old_dm_state) 9361 { 9362 if (dm_state->scaling != old_dm_state->scaling) 9363 return true; 9364 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 9365 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 9366 return true; 9367 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 9368 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 9369 return true; 9370 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 9371 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 9372 return true; 9373 return false; 9374 } 9375 9376 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 9377 struct drm_crtc_state *old_crtc_state, 9378 struct drm_connector_state *new_conn_state, 9379 struct drm_connector_state *old_conn_state, 9380 const struct drm_connector *connector, 9381 struct hdcp_workqueue *hdcp_w) 9382 { 9383 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9384 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 9385 9386 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 9387 connector->index, connector->status, connector->dpms); 9388 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 9389 old_conn_state->content_protection, new_conn_state->content_protection); 9390 9391 if (old_crtc_state) 9392 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9393 old_crtc_state->enable, 9394 old_crtc_state->active, 9395 old_crtc_state->mode_changed, 9396 old_crtc_state->active_changed, 9397 old_crtc_state->connectors_changed); 9398 9399 if (new_crtc_state) 9400 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9401 new_crtc_state->enable, 9402 new_crtc_state->active, 9403 new_crtc_state->mode_changed, 9404 new_crtc_state->active_changed, 9405 new_crtc_state->connectors_changed); 9406 9407 /* hdcp content type change */ 9408 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 9409 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 9410 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9411 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 9412 return true; 9413 } 9414 9415 /* CP is being re enabled, ignore this */ 9416 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 9417 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 9418 if (new_crtc_state && new_crtc_state->mode_changed) { 9419 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9420 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 9421 return true; 9422 } 9423 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 9424 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 9425 return false; 9426 } 9427 9428 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 9429 * 9430 * Handles: UNDESIRED -> ENABLED 9431 */ 9432 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 9433 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 9434 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9435 9436 /* Stream removed and re-enabled 9437 * 9438 * Can sometimes overlap with the HPD case, 9439 * thus set update_hdcp to false to avoid 9440 * setting HDCP multiple times. 9441 * 9442 * Handles: DESIRED -> DESIRED (Special case) 9443 */ 9444 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 9445 new_conn_state->crtc && new_conn_state->crtc->enabled && 9446 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 9447 dm_con_state->update_hdcp = false; 9448 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 9449 __func__); 9450 return true; 9451 } 9452 9453 /* Hot-plug, headless s3, dpms 9454 * 9455 * Only start HDCP if the display is connected/enabled. 9456 * update_hdcp flag will be set to false until the next 9457 * HPD comes in. 9458 * 9459 * Handles: DESIRED -> DESIRED (Special case) 9460 */ 9461 if (dm_con_state->update_hdcp && 9462 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 9463 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 9464 dm_con_state->update_hdcp = false; 9465 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 9466 __func__); 9467 return true; 9468 } 9469 9470 if (old_conn_state->content_protection == new_conn_state->content_protection) { 9471 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 9472 if (new_crtc_state && new_crtc_state->mode_changed) { 9473 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 9474 __func__); 9475 return true; 9476 } 9477 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 9478 __func__); 9479 return false; 9480 } 9481 9482 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 9483 return false; 9484 } 9485 9486 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9487 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 9488 __func__); 9489 return true; 9490 } 9491 9492 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 9493 return false; 9494 } 9495 9496 static void remove_stream(struct amdgpu_device *adev, 9497 struct amdgpu_crtc *acrtc, 9498 struct dc_stream_state *stream) 9499 { 9500 /* this is the update mode case */ 9501 9502 acrtc->otg_inst = -1; 9503 acrtc->enabled = false; 9504 } 9505 9506 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 9507 { 9508 9509 assert_spin_locked(&acrtc->base.dev->event_lock); 9510 WARN_ON(acrtc->event); 9511 9512 acrtc->event = acrtc->base.state->event; 9513 9514 /* Set the flip status */ 9515 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 9516 9517 /* Mark this event as consumed */ 9518 acrtc->base.state->event = NULL; 9519 9520 drm_dbg_state(acrtc->base.dev, 9521 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 9522 acrtc->crtc_id); 9523 } 9524 9525 static void update_freesync_state_on_stream( 9526 struct amdgpu_display_manager *dm, 9527 struct dm_crtc_state *new_crtc_state, 9528 struct dc_stream_state *new_stream, 9529 struct dc_plane_state *surface, 9530 u32 flip_timestamp_in_us) 9531 { 9532 struct mod_vrr_params vrr_params; 9533 struct dc_info_packet vrr_infopacket = {0}; 9534 struct amdgpu_device *adev = dm->adev; 9535 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 9536 unsigned long flags; 9537 bool pack_sdp_v1_3 = false; 9538 struct amdgpu_dm_connector *aconn; 9539 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 9540 9541 if (!new_stream) 9542 return; 9543 9544 /* 9545 * TODO: Determine why min/max totals and vrefresh can be 0 here. 9546 * For now it's sufficient to just guard against these conditions. 9547 */ 9548 9549 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9550 return; 9551 9552 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9553 vrr_params = acrtc->dm_irq_params.vrr_params; 9554 9555 if (surface) { 9556 mod_freesync_handle_preflip( 9557 dm->freesync_module, 9558 surface, 9559 new_stream, 9560 flip_timestamp_in_us, 9561 &vrr_params); 9562 9563 if (adev->family < AMDGPU_FAMILY_AI && 9564 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 9565 mod_freesync_handle_v_update(dm->freesync_module, 9566 new_stream, &vrr_params); 9567 9568 /* Need to call this before the frame ends. */ 9569 dc_stream_adjust_vmin_vmax(dm->dc, 9570 new_crtc_state->stream, 9571 &vrr_params.adjust); 9572 } 9573 } 9574 9575 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 9576 9577 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 9578 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 9579 9580 if (aconn->vsdb_info.amd_vsdb_version == 1) 9581 packet_type = PACKET_TYPE_FS_V1; 9582 else if (aconn->vsdb_info.amd_vsdb_version == 2) 9583 packet_type = PACKET_TYPE_FS_V2; 9584 else if (aconn->vsdb_info.amd_vsdb_version == 3) 9585 packet_type = PACKET_TYPE_FS_V3; 9586 9587 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 9588 &new_stream->adaptive_sync_infopacket); 9589 } 9590 9591 mod_freesync_build_vrr_infopacket( 9592 dm->freesync_module, 9593 new_stream, 9594 &vrr_params, 9595 packet_type, 9596 TRANSFER_FUNC_UNKNOWN, 9597 &vrr_infopacket, 9598 pack_sdp_v1_3); 9599 9600 new_crtc_state->freesync_vrr_info_changed |= 9601 (memcmp(&new_crtc_state->vrr_infopacket, 9602 &vrr_infopacket, 9603 sizeof(vrr_infopacket)) != 0); 9604 9605 acrtc->dm_irq_params.vrr_params = vrr_params; 9606 new_crtc_state->vrr_infopacket = vrr_infopacket; 9607 9608 new_stream->vrr_infopacket = vrr_infopacket; 9609 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 9610 9611 if (new_crtc_state->freesync_vrr_info_changed) 9612 drm_dbg_kms(adev_to_drm(adev), "VRR packet update: crtc=%u enabled=%d state=%d", 9613 new_crtc_state->base.crtc->base.id, 9614 (int)new_crtc_state->base.vrr_enabled, 9615 (int)vrr_params.state); 9616 9617 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9618 } 9619 9620 static void update_stream_irq_parameters( 9621 struct amdgpu_display_manager *dm, 9622 struct dm_crtc_state *new_crtc_state) 9623 { 9624 struct dc_stream_state *new_stream = new_crtc_state->stream; 9625 struct mod_vrr_params vrr_params; 9626 struct mod_freesync_config config = new_crtc_state->freesync_config; 9627 struct amdgpu_device *adev = dm->adev; 9628 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 9629 unsigned long flags; 9630 9631 if (!new_stream) 9632 return; 9633 9634 /* 9635 * TODO: Determine why min/max totals and vrefresh can be 0 here. 9636 * For now it's sufficient to just guard against these conditions. 9637 */ 9638 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9639 return; 9640 9641 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9642 vrr_params = acrtc->dm_irq_params.vrr_params; 9643 9644 if (new_crtc_state->vrr_supported && 9645 config.min_refresh_in_uhz && 9646 config.max_refresh_in_uhz) { 9647 /* 9648 * if freesync compatible mode was set, config.state will be set 9649 * in atomic check 9650 */ 9651 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 9652 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 9653 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 9654 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 9655 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 9656 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 9657 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 9658 } else { 9659 config.state = new_crtc_state->base.vrr_enabled ? 9660 VRR_STATE_ACTIVE_VARIABLE : 9661 VRR_STATE_INACTIVE; 9662 } 9663 } else { 9664 config.state = VRR_STATE_UNSUPPORTED; 9665 } 9666 9667 mod_freesync_build_vrr_params(dm->freesync_module, 9668 new_stream, 9669 &config, &vrr_params); 9670 9671 new_crtc_state->freesync_config = config; 9672 /* Copy state for access from DM IRQ handler */ 9673 acrtc->dm_irq_params.freesync_config = config; 9674 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 9675 acrtc->dm_irq_params.vrr_params = vrr_params; 9676 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9677 } 9678 9679 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 9680 struct dm_crtc_state *new_state) 9681 { 9682 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 9683 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 9684 9685 if (!old_vrr_active && new_vrr_active) { 9686 /* Transition VRR inactive -> active: 9687 * While VRR is active, we must not disable vblank irq, as a 9688 * reenable after disable would compute bogus vblank/pflip 9689 * timestamps if it likely happened inside display front-porch. 9690 * 9691 * We also need vupdate irq for the actual core vblank handling 9692 * at end of vblank. 9693 */ 9694 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 9695 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 9696 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", 9697 __func__, new_state->base.crtc->base.id); 9698 } else if (old_vrr_active && !new_vrr_active) { 9699 /* Transition VRR active -> inactive: 9700 * Allow vblank irq disable again for fixed refresh rate. 9701 */ 9702 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 9703 drm_crtc_vblank_put(new_state->base.crtc); 9704 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", 9705 __func__, new_state->base.crtc->base.id); 9706 } 9707 } 9708 9709 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 9710 { 9711 struct drm_plane *plane; 9712 struct drm_plane_state *old_plane_state; 9713 int i; 9714 9715 /* 9716 * TODO: Make this per-stream so we don't issue redundant updates for 9717 * commits with multiple streams. 9718 */ 9719 for_each_old_plane_in_state(state, plane, old_plane_state, i) 9720 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9721 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 9722 } 9723 9724 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 9725 { 9726 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 9727 9728 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 9729 } 9730 9731 static void amdgpu_dm_update_cursor(struct drm_plane *plane, 9732 struct drm_plane_state *old_plane_state, 9733 struct dc_stream_update *update) 9734 { 9735 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9736 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 9737 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 9738 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 9739 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 9740 uint64_t address = afb ? afb->address : 0; 9741 struct dc_cursor_position position = {0}; 9742 struct dc_cursor_attributes attributes; 9743 int ret; 9744 9745 if (!plane->state->fb && !old_plane_state->fb) 9746 return; 9747 9748 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 9749 amdgpu_crtc->crtc_id, plane->state->crtc_w, 9750 plane->state->crtc_h); 9751 9752 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 9753 if (ret) 9754 return; 9755 9756 if (!position.enable) { 9757 /* turn off cursor */ 9758 if (crtc_state && crtc_state->stream) { 9759 dc_stream_set_cursor_position(crtc_state->stream, 9760 &position); 9761 update->cursor_position = &crtc_state->stream->cursor_position; 9762 } 9763 return; 9764 } 9765 9766 amdgpu_crtc->cursor_width = plane->state->crtc_w; 9767 amdgpu_crtc->cursor_height = plane->state->crtc_h; 9768 9769 memset(&attributes, 0, sizeof(attributes)); 9770 attributes.address.high_part = upper_32_bits(address); 9771 attributes.address.low_part = lower_32_bits(address); 9772 attributes.width = plane->state->crtc_w; 9773 attributes.height = plane->state->crtc_h; 9774 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 9775 attributes.rotation_angle = 0; 9776 attributes.attribute_flags.value = 0; 9777 9778 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 9779 * legacy gamma setup. 9780 */ 9781 if (crtc_state->cm_is_degamma_srgb && 9782 adev->dm.dc->caps.color.dpp.gamma_corr) 9783 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 9784 9785 if (afb) 9786 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 9787 9788 if (crtc_state->stream) { 9789 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 9790 &attributes)) 9791 drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n"); 9792 9793 update->cursor_attributes = &crtc_state->stream->cursor_attributes; 9794 9795 if (!dc_stream_set_cursor_position(crtc_state->stream, 9796 &position)) 9797 drm_err(adev_to_drm(adev), "DC failed to set cursor position\n"); 9798 9799 update->cursor_position = &crtc_state->stream->cursor_position; 9800 } 9801 } 9802 9803 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 9804 const struct dm_crtc_state *acrtc_state, 9805 const u64 current_ts) 9806 { 9807 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 9808 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 9809 struct amdgpu_dm_connector *aconn = 9810 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9811 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 9812 9813 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9814 if (pr->config.replay_supported && !pr->replay_feature_enabled) 9815 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 9816 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 9817 !psr->psr_feature_enabled) 9818 if (!aconn->disallow_edp_enter_psr) 9819 amdgpu_dm_link_setup_psr(acrtc_state->stream); 9820 } 9821 9822 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 9823 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9824 (psr->psr_feature_enabled || pr->config.replay_supported)) { 9825 if (aconn->sr_skip_count > 0) 9826 aconn->sr_skip_count--; 9827 9828 /* Allow SR when skip count is 0. */ 9829 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 9830 9831 /* 9832 * If sink supports PSR SU/Panel Replay, there is no need to rely on 9833 * a vblank event disable request to enable PSR/RP. PSR SU/RP 9834 * can be enabled immediately once OS demonstrates an 9835 * adequate number of fast atomic commits to notify KMD 9836 * of update events. See `vblank_control_worker()`. 9837 */ 9838 if (!vrr_active && 9839 acrtc_attach->dm_irq_params.allow_sr_entry && 9840 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9841 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9842 #endif 9843 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 9844 if (pr->replay_feature_enabled && !pr->replay_allow_active) 9845 amdgpu_dm_replay_enable(acrtc_state->stream, true); 9846 if (psr->psr_version == DC_PSR_VERSION_SU_1 && 9847 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 9848 amdgpu_dm_psr_enable(acrtc_state->stream); 9849 } 9850 } else { 9851 acrtc_attach->dm_irq_params.allow_sr_entry = false; 9852 } 9853 } 9854 9855 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 9856 struct drm_device *dev, 9857 struct amdgpu_display_manager *dm, 9858 struct drm_crtc *pcrtc, 9859 bool wait_for_vblank) 9860 { 9861 u32 i; 9862 u64 timestamp_ns = ktime_get_ns(); 9863 struct drm_plane *plane; 9864 struct drm_plane_state *old_plane_state, *new_plane_state; 9865 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 9866 struct drm_crtc_state *new_pcrtc_state = 9867 drm_atomic_get_new_crtc_state(state, pcrtc); 9868 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 9869 struct dm_crtc_state *dm_old_crtc_state = 9870 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 9871 int planes_count = 0, vpos, hpos; 9872 unsigned long flags; 9873 u32 target_vblank, last_flip_vblank; 9874 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 9875 bool cursor_update = false; 9876 bool pflip_present = false; 9877 bool dirty_rects_changed = false; 9878 bool updated_planes_and_streams = false; 9879 struct { 9880 struct dc_surface_update surface_updates[MAX_SURFACES]; 9881 struct dc_plane_info plane_infos[MAX_SURFACES]; 9882 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 9883 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 9884 struct dc_stream_update stream_update; 9885 } *bundle; 9886 9887 bundle = kzalloc_obj(*bundle); 9888 9889 if (!bundle) { 9890 drm_err(dev, "Failed to allocate update bundle\n"); 9891 goto cleanup; 9892 } 9893 9894 /* 9895 * Disable the cursor first if we're disabling all the planes. 9896 * It'll remain on the screen after the planes are re-enabled 9897 * if we don't. 9898 * 9899 * If the cursor is transitioning from native to overlay mode, the 9900 * native cursor needs to be disabled first. 9901 */ 9902 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && 9903 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9904 struct dc_cursor_position cursor_position = {0}; 9905 9906 if (!dc_stream_set_cursor_position(acrtc_state->stream, 9907 &cursor_position)) 9908 drm_err(dev, "DC failed to disable native cursor\n"); 9909 9910 bundle->stream_update.cursor_position = 9911 &acrtc_state->stream->cursor_position; 9912 } 9913 9914 if (acrtc_state->active_planes == 0 && 9915 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9916 amdgpu_dm_commit_cursors(state); 9917 9918 /* update planes when needed */ 9919 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9920 struct drm_crtc *crtc = new_plane_state->crtc; 9921 struct drm_crtc_state *new_crtc_state; 9922 struct drm_framebuffer *fb = new_plane_state->fb; 9923 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9924 bool plane_needs_flip; 9925 struct dc_plane_state *dc_plane; 9926 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9927 9928 /* Cursor plane is handled after stream updates */ 9929 if (plane->type == DRM_PLANE_TYPE_CURSOR && 9930 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9931 if ((fb && crtc == pcrtc) || 9932 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { 9933 cursor_update = true; 9934 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) 9935 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); 9936 } 9937 9938 continue; 9939 } 9940 9941 if (!fb || !crtc || pcrtc != crtc) 9942 continue; 9943 9944 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9945 if (!new_crtc_state->active) 9946 continue; 9947 9948 dc_plane = dm_new_plane_state->dc_state; 9949 if (!dc_plane) 9950 continue; 9951 9952 bundle->surface_updates[planes_count].surface = dc_plane; 9953 if (new_pcrtc_state->color_mgmt_changed) { 9954 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; 9955 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; 9956 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9957 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; 9958 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; 9959 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; 9960 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; 9961 } 9962 9963 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 9964 &bundle->scaling_infos[planes_count]); 9965 9966 bundle->surface_updates[planes_count].scaling_info = 9967 &bundle->scaling_infos[planes_count]; 9968 9969 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9970 9971 pflip_present = pflip_present || plane_needs_flip; 9972 9973 if (!plane_needs_flip) { 9974 planes_count += 1; 9975 continue; 9976 } 9977 9978 fill_dc_plane_info_and_addr( 9979 dm->adev, new_plane_state, 9980 afb->tiling_flags, 9981 &bundle->plane_infos[planes_count], 9982 &bundle->flip_addrs[planes_count].address, 9983 afb->tmz_surface); 9984 9985 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9986 new_plane_state->plane->index, 9987 bundle->plane_infos[planes_count].dcc.enable); 9988 9989 bundle->surface_updates[planes_count].plane_info = 9990 &bundle->plane_infos[planes_count]; 9991 9992 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 9993 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9994 fill_dc_dirty_rects(plane, old_plane_state, 9995 new_plane_state, new_crtc_state, 9996 &bundle->flip_addrs[planes_count], 9997 acrtc_state->stream->link->psr_settings.psr_version == 9998 DC_PSR_VERSION_SU_1, 9999 &dirty_rects_changed); 10000 10001 /* 10002 * If the dirty regions changed, PSR-SU need to be disabled temporarily 10003 * and enabled it again after dirty regions are stable to avoid video glitch. 10004 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 10005 * during the PSR-SU was disabled. 10006 */ 10007 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 10008 acrtc_attach->dm_irq_params.allow_sr_entry && 10009 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 10010 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 10011 #endif 10012 dirty_rects_changed) { 10013 mutex_lock(&dm->dc_lock); 10014 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 10015 timestamp_ns; 10016 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 10017 amdgpu_dm_psr_disable(acrtc_state->stream, true); 10018 mutex_unlock(&dm->dc_lock); 10019 } 10020 } 10021 10022 /* 10023 * Only allow immediate flips for fast updates that don't 10024 * change memory domain, FB pitch, DCC state, rotation or 10025 * mirroring. 10026 * 10027 * dm_crtc_helper_atomic_check() only accepts async flips with 10028 * fast updates. 10029 */ 10030 if (crtc->state->async_flip && 10031 (acrtc_state->update_type != UPDATE_TYPE_FAST || 10032 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 10033 drm_warn_once(state->dev, 10034 "[PLANE:%d:%s] async flip with non-fast update\n", 10035 plane->base.id, plane->name); 10036 10037 bundle->flip_addrs[planes_count].flip_immediate = 10038 crtc->state->async_flip && 10039 acrtc_state->update_type == UPDATE_TYPE_FAST && 10040 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 10041 10042 timestamp_ns = ktime_get_ns(); 10043 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 10044 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 10045 bundle->surface_updates[planes_count].surface = dc_plane; 10046 10047 if (!bundle->surface_updates[planes_count].surface) { 10048 drm_err(dev, "No surface for CRTC: id=%d\n", 10049 acrtc_attach->crtc_id); 10050 continue; 10051 } 10052 10053 if (plane == pcrtc->primary) 10054 update_freesync_state_on_stream( 10055 dm, 10056 acrtc_state, 10057 acrtc_state->stream, 10058 dc_plane, 10059 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 10060 10061 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 10062 __func__, 10063 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 10064 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 10065 10066 planes_count += 1; 10067 10068 } 10069 10070 if (pflip_present) { 10071 if (!vrr_active) { 10072 /* Use old throttling in non-vrr fixed refresh rate mode 10073 * to keep flip scheduling based on target vblank counts 10074 * working in a backwards compatible way, e.g., for 10075 * clients using the GLX_OML_sync_control extension or 10076 * DRI3/Present extension with defined target_msc. 10077 */ 10078 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 10079 } else { 10080 /* For variable refresh rate mode only: 10081 * Get vblank of last completed flip to avoid > 1 vrr 10082 * flips per video frame by use of throttling, but allow 10083 * flip programming anywhere in the possibly large 10084 * variable vrr vblank interval for fine-grained flip 10085 * timing control and more opportunity to avoid stutter 10086 * on late submission of flips. 10087 */ 10088 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 10089 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 10090 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 10091 } 10092 10093 target_vblank = last_flip_vblank + wait_for_vblank; 10094 10095 /* 10096 * Wait until we're out of the vertical blank period before the one 10097 * targeted by the flip 10098 */ 10099 while ((acrtc_attach->enabled && 10100 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 10101 0, &vpos, &hpos, NULL, 10102 NULL, &pcrtc->hwmode) 10103 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 10104 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 10105 (int)(target_vblank - 10106 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 10107 usleep_range(1000, 1100); 10108 } 10109 10110 /** 10111 * Prepare the flip event for the pageflip interrupt to handle. 10112 * 10113 * This only works in the case where we've already turned on the 10114 * appropriate hardware blocks (eg. HUBP) so in the transition case 10115 * from 0 -> n planes we have to skip a hardware generated event 10116 * and rely on sending it from software. 10117 */ 10118 if (acrtc_attach->base.state->event && 10119 acrtc_state->active_planes > 0) { 10120 drm_crtc_vblank_get(pcrtc); 10121 10122 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 10123 10124 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 10125 prepare_flip_isr(acrtc_attach); 10126 10127 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 10128 } 10129 10130 if (acrtc_state->stream) { 10131 if (acrtc_state->freesync_vrr_info_changed) 10132 bundle->stream_update.vrr_infopacket = 10133 &acrtc_state->stream->vrr_infopacket; 10134 } 10135 } else if (cursor_update && acrtc_state->active_planes > 0) { 10136 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 10137 if (acrtc_attach->base.state->event) { 10138 drm_crtc_vblank_get(pcrtc); 10139 acrtc_attach->event = acrtc_attach->base.state->event; 10140 acrtc_attach->base.state->event = NULL; 10141 } 10142 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 10143 } 10144 10145 /* Update the planes if changed or disable if we don't have any. */ 10146 if ((planes_count || acrtc_state->active_planes == 0) && 10147 acrtc_state->stream) { 10148 /* 10149 * If PSR or idle optimizations are enabled then flush out 10150 * any pending work before hardware programming. 10151 */ 10152 if (dm->vblank_control_workqueue) 10153 flush_workqueue(dm->vblank_control_workqueue); 10154 10155 bundle->stream_update.stream = acrtc_state->stream; 10156 if (new_pcrtc_state->mode_changed) { 10157 bundle->stream_update.src = acrtc_state->stream->src; 10158 bundle->stream_update.dst = acrtc_state->stream->dst; 10159 } 10160 10161 if (new_pcrtc_state->color_mgmt_changed) { 10162 /* 10163 * TODO: This isn't fully correct since we've actually 10164 * already modified the stream in place. 10165 */ 10166 bundle->stream_update.gamut_remap = 10167 &acrtc_state->stream->gamut_remap_matrix; 10168 bundle->stream_update.output_csc_transform = 10169 &acrtc_state->stream->csc_color_matrix; 10170 bundle->stream_update.out_transfer_func = 10171 &acrtc_state->stream->out_transfer_func; 10172 bundle->stream_update.lut3d_func = 10173 (struct dc_3dlut *) acrtc_state->stream->lut3d_func; 10174 bundle->stream_update.func_shaper = 10175 (struct dc_transfer_func *) acrtc_state->stream->func_shaper; 10176 } 10177 10178 acrtc_state->stream->abm_level = acrtc_state->abm_level; 10179 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 10180 bundle->stream_update.abm_level = &acrtc_state->abm_level; 10181 10182 mutex_lock(&dm->dc_lock); 10183 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) { 10184 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 10185 amdgpu_dm_replay_disable(acrtc_state->stream); 10186 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 10187 amdgpu_dm_psr_disable(acrtc_state->stream, true); 10188 } 10189 mutex_unlock(&dm->dc_lock); 10190 10191 /* 10192 * If FreeSync state on the stream has changed then we need to 10193 * re-adjust the min/max bounds now that DC doesn't handle this 10194 * as part of commit. 10195 */ 10196 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 10197 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 10198 dc_stream_adjust_vmin_vmax( 10199 dm->dc, acrtc_state->stream, 10200 &acrtc_attach->dm_irq_params.vrr_params.adjust); 10201 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 10202 } 10203 mutex_lock(&dm->dc_lock); 10204 update_planes_and_stream_adapter(dm->dc, 10205 acrtc_state->update_type, 10206 planes_count, 10207 acrtc_state->stream, 10208 &bundle->stream_update, 10209 bundle->surface_updates); 10210 updated_planes_and_streams = true; 10211 10212 /** 10213 * Enable or disable the interrupts on the backend. 10214 * 10215 * Most pipes are put into power gating when unused. 10216 * 10217 * When power gating is enabled on a pipe we lose the 10218 * interrupt enablement state when power gating is disabled. 10219 * 10220 * So we need to update the IRQ control state in hardware 10221 * whenever the pipe turns on (since it could be previously 10222 * power gated) or off (since some pipes can't be power gated 10223 * on some ASICs). 10224 */ 10225 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 10226 dm_update_pflip_irq_state(drm_to_adev(dev), 10227 acrtc_attach); 10228 10229 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 10230 mutex_unlock(&dm->dc_lock); 10231 } 10232 10233 /* 10234 * Update cursor state *after* programming all the planes. 10235 * This avoids redundant programming in the case where we're going 10236 * to be disabling a single plane - those pipes are being disabled. 10237 */ 10238 if (acrtc_state->active_planes && 10239 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && 10240 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 10241 amdgpu_dm_commit_cursors(state); 10242 10243 cleanup: 10244 kfree(bundle); 10245 } 10246 10247 static void amdgpu_dm_commit_audio(struct drm_device *dev, 10248 struct drm_atomic_state *state) 10249 { 10250 struct amdgpu_device *adev = drm_to_adev(dev); 10251 struct amdgpu_dm_connector *aconnector; 10252 struct drm_connector *connector; 10253 struct drm_connector_state *old_con_state, *new_con_state; 10254 struct drm_crtc_state *new_crtc_state; 10255 struct dm_crtc_state *new_dm_crtc_state; 10256 const struct dc_stream_status *status; 10257 int i, inst; 10258 10259 /* Notify device removals. */ 10260 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10261 if (old_con_state->crtc != new_con_state->crtc) { 10262 /* CRTC changes require notification. */ 10263 goto notify; 10264 } 10265 10266 if (!new_con_state->crtc) 10267 continue; 10268 10269 new_crtc_state = drm_atomic_get_new_crtc_state( 10270 state, new_con_state->crtc); 10271 10272 if (!new_crtc_state) 10273 continue; 10274 10275 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10276 continue; 10277 10278 notify: 10279 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10280 continue; 10281 10282 aconnector = to_amdgpu_dm_connector(connector); 10283 10284 mutex_lock(&adev->dm.audio_lock); 10285 inst = aconnector->audio_inst; 10286 aconnector->audio_inst = -1; 10287 mutex_unlock(&adev->dm.audio_lock); 10288 10289 amdgpu_dm_audio_eld_notify(adev, inst); 10290 } 10291 10292 /* Notify audio device additions. */ 10293 for_each_new_connector_in_state(state, connector, new_con_state, i) { 10294 if (!new_con_state->crtc) 10295 continue; 10296 10297 new_crtc_state = drm_atomic_get_new_crtc_state( 10298 state, new_con_state->crtc); 10299 10300 if (!new_crtc_state) 10301 continue; 10302 10303 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10304 continue; 10305 10306 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 10307 if (!new_dm_crtc_state->stream) 10308 continue; 10309 10310 status = dc_stream_get_status(new_dm_crtc_state->stream); 10311 if (!status) 10312 continue; 10313 10314 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10315 continue; 10316 10317 aconnector = to_amdgpu_dm_connector(connector); 10318 10319 mutex_lock(&adev->dm.audio_lock); 10320 inst = status->audio_inst; 10321 aconnector->audio_inst = inst; 10322 mutex_unlock(&adev->dm.audio_lock); 10323 10324 amdgpu_dm_audio_eld_notify(adev, inst); 10325 } 10326 } 10327 10328 /* 10329 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 10330 * @crtc_state: the DRM CRTC state 10331 * @stream_state: the DC stream state. 10332 * 10333 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 10334 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 10335 */ 10336 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 10337 struct dc_stream_state *stream_state) 10338 { 10339 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 10340 } 10341 10342 static void dm_clear_writeback(struct amdgpu_display_manager *dm, 10343 struct dm_crtc_state *crtc_state) 10344 { 10345 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 10346 } 10347 10348 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 10349 struct dc_state *dc_state) 10350 { 10351 struct drm_device *dev = state->dev; 10352 struct amdgpu_device *adev = drm_to_adev(dev); 10353 struct amdgpu_display_manager *dm = &adev->dm; 10354 struct drm_crtc *crtc; 10355 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10356 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10357 struct drm_connector_state *old_con_state; 10358 struct drm_connector *connector; 10359 bool mode_set_reset_required = false; 10360 u32 i; 10361 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 10362 bool set_backlight_level = false; 10363 10364 /* Disable writeback */ 10365 for_each_old_connector_in_state(state, connector, old_con_state, i) { 10366 struct dm_connector_state *dm_old_con_state; 10367 struct amdgpu_crtc *acrtc; 10368 10369 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 10370 continue; 10371 10372 old_crtc_state = NULL; 10373 10374 dm_old_con_state = to_dm_connector_state(old_con_state); 10375 if (!dm_old_con_state->base.crtc) 10376 continue; 10377 10378 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 10379 if (acrtc) 10380 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10381 10382 if (!acrtc || !acrtc->wb_enabled) 10383 continue; 10384 10385 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10386 10387 dm_clear_writeback(dm, dm_old_crtc_state); 10388 acrtc->wb_enabled = false; 10389 } 10390 10391 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 10392 new_crtc_state, i) { 10393 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10394 10395 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10396 10397 if (old_crtc_state->active && 10398 (!new_crtc_state->active || 10399 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10400 manage_dm_interrupts(adev, acrtc, NULL); 10401 dc_stream_release(dm_old_crtc_state->stream); 10402 } 10403 } 10404 10405 drm_atomic_helper_calc_timestamping_constants(state); 10406 10407 /* update changed items */ 10408 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10409 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10410 10411 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10412 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10413 10414 drm_dbg_state(state->dev, 10415 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 10416 acrtc->crtc_id, 10417 new_crtc_state->enable, 10418 new_crtc_state->active, 10419 new_crtc_state->planes_changed, 10420 new_crtc_state->mode_changed, 10421 new_crtc_state->active_changed, 10422 new_crtc_state->connectors_changed); 10423 10424 /* Disable cursor if disabling crtc */ 10425 if (old_crtc_state->active && !new_crtc_state->active) { 10426 struct dc_cursor_position position; 10427 10428 memset(&position, 0, sizeof(position)); 10429 mutex_lock(&dm->dc_lock); 10430 dc_exit_ips_for_hw_access(dm->dc); 10431 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); 10432 mutex_unlock(&dm->dc_lock); 10433 } 10434 10435 /* Copy all transient state flags into dc state */ 10436 if (dm_new_crtc_state->stream) { 10437 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 10438 dm_new_crtc_state->stream); 10439 } 10440 10441 /* handles headless hotplug case, updating new_state and 10442 * aconnector as needed 10443 */ 10444 10445 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 10446 10447 drm_dbg_atomic(dev, 10448 "Atomic commit: SET crtc id %d: [%p]\n", 10449 acrtc->crtc_id, acrtc); 10450 10451 if (!dm_new_crtc_state->stream) { 10452 /* 10453 * this could happen because of issues with 10454 * userspace notifications delivery. 10455 * In this case userspace tries to set mode on 10456 * display which is disconnected in fact. 10457 * dc_sink is NULL in this case on aconnector. 10458 * We expect reset mode will come soon. 10459 * 10460 * This can also happen when unplug is done 10461 * during resume sequence ended 10462 * 10463 * In this case, we want to pretend we still 10464 * have a sink to keep the pipe running so that 10465 * hw state is consistent with the sw state 10466 */ 10467 drm_dbg_atomic(dev, 10468 "Failed to create new stream for crtc %d\n", 10469 acrtc->base.base.id); 10470 continue; 10471 } 10472 10473 if (dm_old_crtc_state->stream) 10474 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 10475 10476 pm_runtime_get_noresume(dev->dev); 10477 10478 acrtc->enabled = true; 10479 acrtc->hw_mode = new_crtc_state->mode; 10480 crtc->hwmode = new_crtc_state->mode; 10481 mode_set_reset_required = true; 10482 set_backlight_level = true; 10483 } else if (modereset_required(new_crtc_state)) { 10484 drm_dbg_atomic(dev, 10485 "Atomic commit: RESET. crtc id %d:[%p]\n", 10486 acrtc->crtc_id, acrtc); 10487 /* i.e. reset mode */ 10488 if (dm_old_crtc_state->stream) 10489 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 10490 10491 mode_set_reset_required = true; 10492 } 10493 } /* for_each_crtc_in_state() */ 10494 10495 /* if there mode set or reset, disable eDP PSR, Replay */ 10496 if (mode_set_reset_required) { 10497 if (dm->vblank_control_workqueue) 10498 flush_workqueue(dm->vblank_control_workqueue); 10499 10500 amdgpu_dm_replay_disable_all(dm); 10501 amdgpu_dm_psr_disable_all(dm); 10502 } 10503 10504 dm_enable_per_frame_crtc_master_sync(dc_state); 10505 mutex_lock(&dm->dc_lock); 10506 dc_exit_ips_for_hw_access(dm->dc); 10507 WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); 10508 10509 /* Allow idle optimization when vblank count is 0 for display off */ 10510 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) 10511 dc_allow_idle_optimizations(dm->dc, true); 10512 mutex_unlock(&dm->dc_lock); 10513 10514 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10515 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10516 10517 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10518 10519 if (dm_new_crtc_state->stream != NULL) { 10520 const struct dc_stream_status *status = 10521 dc_stream_get_status(dm_new_crtc_state->stream); 10522 10523 if (!status) 10524 status = dc_state_get_stream_status(dc_state, 10525 dm_new_crtc_state->stream); 10526 if (!status) 10527 drm_err(dev, 10528 "got no status for stream %p on acrtc%p\n", 10529 dm_new_crtc_state->stream, acrtc); 10530 else 10531 acrtc->otg_inst = status->primary_otg_inst; 10532 } 10533 } 10534 10535 /* During boot up and resume the DC layer will reset the panel brightness 10536 * to fix a flicker issue. 10537 * It will cause the dm->actual_brightness is not the current panel brightness 10538 * level. (the dm->brightness is the correct panel level) 10539 * So we set the backlight level with dm->brightness value after set mode 10540 */ 10541 if (set_backlight_level) { 10542 for (i = 0; i < dm->num_of_edps; i++) { 10543 if (dm->backlight_dev[i]) 10544 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10545 } 10546 } 10547 } 10548 10549 static void dm_set_writeback(struct amdgpu_display_manager *dm, 10550 struct dm_crtc_state *crtc_state, 10551 struct drm_connector *connector, 10552 struct drm_connector_state *new_con_state) 10553 { 10554 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 10555 struct amdgpu_device *adev = dm->adev; 10556 struct amdgpu_crtc *acrtc; 10557 struct dc_writeback_info *wb_info; 10558 struct pipe_ctx *pipe = NULL; 10559 struct amdgpu_framebuffer *afb; 10560 int i = 0; 10561 10562 wb_info = kzalloc_obj(*wb_info); 10563 if (!wb_info) { 10564 drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n"); 10565 return; 10566 } 10567 10568 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 10569 if (!acrtc) { 10570 drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n"); 10571 kfree(wb_info); 10572 return; 10573 } 10574 10575 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 10576 if (!afb) { 10577 drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n"); 10578 kfree(wb_info); 10579 return; 10580 } 10581 10582 for (i = 0; i < MAX_PIPES; i++) { 10583 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 10584 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 10585 break; 10586 } 10587 } 10588 10589 /* fill in wb_info */ 10590 wb_info->wb_enabled = true; 10591 10592 wb_info->dwb_pipe_inst = 0; 10593 wb_info->dwb_params.dwbscl_black_color = 0; 10594 wb_info->dwb_params.hdr_mult = 0x1F000; 10595 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 10596 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 10597 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 10598 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 10599 10600 /* width & height from crtc */ 10601 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 10602 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 10603 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 10604 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 10605 10606 wb_info->dwb_params.cnv_params.crop_en = false; 10607 wb_info->dwb_params.stereo_params.stereo_enabled = false; 10608 10609 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 10610 wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 10611 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 10612 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 10613 10614 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 10615 10616 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 10617 10618 wb_info->dwb_params.scaler_taps.h_taps = 1; 10619 wb_info->dwb_params.scaler_taps.v_taps = 1; 10620 wb_info->dwb_params.scaler_taps.h_taps_c = 1; 10621 wb_info->dwb_params.scaler_taps.v_taps_c = 1; 10622 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 10623 10624 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 10625 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 10626 10627 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 10628 wb_info->mcif_buf_params.luma_address[i] = afb->address; 10629 wb_info->mcif_buf_params.chroma_address[i] = 0; 10630 } 10631 10632 wb_info->mcif_buf_params.p_vmid = 1; 10633 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { 10634 wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 10635 wb_info->mcif_warmup_params.region_size = 10636 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 10637 } 10638 wb_info->mcif_warmup_params.p_vmid = 1; 10639 wb_info->writeback_source_plane = pipe->plane_state; 10640 10641 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 10642 10643 acrtc->wb_pending = true; 10644 acrtc->wb_conn = wb_conn; 10645 drm_writeback_queue_job(wb_conn, new_con_state); 10646 } 10647 10648 static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state) 10649 { 10650 struct drm_connector_state *old_con_state, *new_con_state; 10651 struct drm_device *dev = state->dev; 10652 struct drm_connector *connector; 10653 struct amdgpu_device *adev = drm_to_adev(dev); 10654 int i; 10655 10656 if (!adev->dm.hdcp_workqueue) 10657 return; 10658 10659 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10660 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10661 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10662 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10663 struct dm_crtc_state *dm_new_crtc_state; 10664 struct amdgpu_dm_connector *aconnector; 10665 10666 if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10667 continue; 10668 10669 aconnector = to_amdgpu_dm_connector(connector); 10670 10671 drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i); 10672 10673 drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 10674 connector->index, connector->status, connector->dpms); 10675 drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n", 10676 old_con_state->content_protection, new_con_state->content_protection); 10677 10678 if (aconnector->dc_sink) { 10679 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 10680 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 10681 drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n", 10682 aconnector->dc_sink->edid_caps.display_name); 10683 } 10684 } 10685 10686 new_crtc_state = NULL; 10687 old_crtc_state = NULL; 10688 10689 if (acrtc) { 10690 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10691 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10692 } 10693 10694 if (old_crtc_state) 10695 drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 10696 old_crtc_state->enable, 10697 old_crtc_state->active, 10698 old_crtc_state->mode_changed, 10699 old_crtc_state->active_changed, 10700 old_crtc_state->connectors_changed); 10701 10702 if (new_crtc_state) 10703 drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 10704 new_crtc_state->enable, 10705 new_crtc_state->active, 10706 new_crtc_state->mode_changed, 10707 new_crtc_state->active_changed, 10708 new_crtc_state->connectors_changed); 10709 10710 10711 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10712 10713 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 10714 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 10715 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 10716 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 10717 dm_new_con_state->update_hdcp = true; 10718 continue; 10719 } 10720 10721 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 10722 old_con_state, connector, adev->dm.hdcp_workqueue)) { 10723 /* when display is unplugged from mst hub, connctor will 10724 * be destroyed within dm_dp_mst_connector_destroy. connector 10725 * hdcp perperties, like type, undesired, desired, enabled, 10726 * will be lost. So, save hdcp properties into hdcp_work within 10727 * amdgpu_dm_atomic_commit_tail. if the same display is 10728 * plugged back with same display index, its hdcp properties 10729 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 10730 */ 10731 10732 bool enable_encryption = false; 10733 10734 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 10735 enable_encryption = true; 10736 10737 if (aconnector->dc_link && aconnector->dc_sink && 10738 aconnector->dc_link->type == dc_connection_mst_branch) { 10739 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 10740 struct hdcp_workqueue *hdcp_w = 10741 &hdcp_work[aconnector->dc_link->link_index]; 10742 10743 hdcp_w->hdcp_content_type[connector->index] = 10744 new_con_state->hdcp_content_type; 10745 hdcp_w->content_protection[connector->index] = 10746 new_con_state->content_protection; 10747 } 10748 10749 if (new_crtc_state && new_crtc_state->mode_changed && 10750 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 10751 enable_encryption = true; 10752 10753 drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 10754 10755 if (aconnector->dc_link) 10756 hdcp_update_display( 10757 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 10758 new_con_state->hdcp_content_type, enable_encryption); 10759 } 10760 } 10761 } 10762 10763 static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state) 10764 { 10765 struct drm_crtc *crtc; 10766 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10767 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10768 int i, ret; 10769 10770 ret = drm_dp_mst_atomic_setup_commit(state); 10771 if (ret) 10772 return ret; 10773 10774 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10775 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10776 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10777 /* 10778 * Color management settings. We also update color properties 10779 * when a modeset is needed, to ensure it gets reprogrammed. 10780 */ 10781 if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream && 10782 (dm_new_crtc_state->base.color_mgmt_changed || 10783 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 10784 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10785 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10786 if (ret) { 10787 drm_dbg_atomic(state->dev, "Failed to update color state\n"); 10788 return ret; 10789 } 10790 } 10791 } 10792 10793 return 0; 10794 } 10795 10796 /** 10797 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 10798 * @state: The atomic state to commit 10799 * 10800 * This will tell DC to commit the constructed DC state from atomic_check, 10801 * programming the hardware. Any failures here implies a hardware failure, since 10802 * atomic check should have filtered anything non-kosher. 10803 */ 10804 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 10805 { 10806 struct drm_device *dev = state->dev; 10807 struct amdgpu_device *adev = drm_to_adev(dev); 10808 struct amdgpu_display_manager *dm = &adev->dm; 10809 struct dm_atomic_state *dm_state; 10810 struct dc_state *dc_state = NULL; 10811 u32 i, j; 10812 struct drm_crtc *crtc; 10813 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10814 unsigned long flags; 10815 bool wait_for_vblank = true; 10816 struct drm_connector *connector; 10817 struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL; 10818 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10819 int crtc_disable_count = 0; 10820 10821 trace_amdgpu_dm_atomic_commit_tail_begin(state); 10822 10823 drm_atomic_helper_update_legacy_modeset_state(dev, state); 10824 drm_dp_mst_atomic_wait_for_dependencies(state); 10825 10826 dm_state = dm_atomic_get_new_state(state); 10827 if (dm_state && dm_state->context) { 10828 dc_state = dm_state->context; 10829 amdgpu_dm_commit_streams(state, dc_state); 10830 } 10831 10832 amdgpu_dm_update_hdcp(state); 10833 10834 /* Handle connector state changes */ 10835 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10836 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10837 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10838 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10839 struct dc_surface_update *dummy_updates; 10840 struct dc_stream_update stream_update; 10841 struct dc_info_packet hdr_packet; 10842 struct dc_stream_status *status = NULL; 10843 bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false; 10844 10845 memset(&stream_update, 0, sizeof(stream_update)); 10846 10847 if (acrtc) { 10848 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10849 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10850 } 10851 10852 /* Skip any modesets/resets */ 10853 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 10854 continue; 10855 10856 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10857 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10858 10859 scaling_changed = is_scaling_state_different(dm_new_con_state, 10860 dm_old_con_state); 10861 10862 if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) && 10863 (dm_old_crtc_state->stream->output_color_space != 10864 get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state))) 10865 output_color_space_changed = true; 10866 10867 abm_changed = dm_new_crtc_state->abm_level != 10868 dm_old_crtc_state->abm_level; 10869 10870 hdr_changed = 10871 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 10872 10873 if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed) 10874 continue; 10875 10876 stream_update.stream = dm_new_crtc_state->stream; 10877 if (scaling_changed) { 10878 update_stream_scaling_settings(dev, &dm_new_con_state->base.crtc->mode, 10879 dm_new_con_state, dm_new_crtc_state->stream); 10880 10881 stream_update.src = dm_new_crtc_state->stream->src; 10882 stream_update.dst = dm_new_crtc_state->stream->dst; 10883 } 10884 10885 if (output_color_space_changed) { 10886 dm_new_crtc_state->stream->output_color_space 10887 = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state); 10888 10889 stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space; 10890 } 10891 10892 if (abm_changed) { 10893 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 10894 10895 stream_update.abm_level = &dm_new_crtc_state->abm_level; 10896 } 10897 10898 if (hdr_changed) { 10899 fill_hdr_info_packet(new_con_state, &hdr_packet); 10900 stream_update.hdr_static_metadata = &hdr_packet; 10901 } 10902 10903 status = dc_stream_get_status(dm_new_crtc_state->stream); 10904 10905 if (WARN_ON(!status)) 10906 continue; 10907 10908 WARN_ON(!status->plane_count); 10909 10910 /* 10911 * TODO: DC refuses to perform stream updates without a dc_surface_update. 10912 * Here we create an empty update on each plane. 10913 * To fix this, DC should permit updating only stream properties. 10914 */ 10915 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL); 10916 if (!dummy_updates) { 10917 drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); 10918 continue; 10919 } 10920 for (j = 0; j < status->plane_count; j++) 10921 dummy_updates[j].surface = status->plane_states[j]; 10922 10923 sort(dummy_updates, status->plane_count, 10924 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); 10925 10926 mutex_lock(&dm->dc_lock); 10927 dc_exit_ips_for_hw_access(dm->dc); 10928 dc_update_planes_and_stream(dm->dc, 10929 dummy_updates, 10930 status->plane_count, 10931 dm_new_crtc_state->stream, 10932 &stream_update); 10933 mutex_unlock(&dm->dc_lock); 10934 kfree(dummy_updates); 10935 10936 drm_connector_update_privacy_screen(new_con_state); 10937 } 10938 10939 /** 10940 * Enable interrupts for CRTCs that are newly enabled or went through 10941 * a modeset. It was intentionally deferred until after the front end 10942 * state was modified to wait until the OTG was on and so the IRQ 10943 * handlers didn't access stale or invalid state. 10944 */ 10945 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10946 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10947 #ifdef CONFIG_DEBUG_FS 10948 enum amdgpu_dm_pipe_crc_source cur_crc_src; 10949 #endif 10950 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 10951 if (old_crtc_state->active && !new_crtc_state->active) 10952 crtc_disable_count++; 10953 10954 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10955 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10956 10957 /* For freesync config update on crtc state and params for irq */ 10958 update_stream_irq_parameters(dm, dm_new_crtc_state); 10959 10960 #ifdef CONFIG_DEBUG_FS 10961 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10962 cur_crc_src = acrtc->dm_irq_params.crc_src; 10963 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10964 #endif 10965 10966 if (new_crtc_state->active && 10967 (!old_crtc_state->active || 10968 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10969 dc_stream_retain(dm_new_crtc_state->stream); 10970 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10971 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); 10972 } 10973 /* Handle vrr on->off / off->on transitions */ 10974 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 10975 10976 #ifdef CONFIG_DEBUG_FS 10977 if (new_crtc_state->active && 10978 (!old_crtc_state->active || 10979 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10980 /** 10981 * Frontend may have changed so reapply the CRC capture 10982 * settings for the stream. 10983 */ 10984 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10985 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10986 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10987 uint8_t cnt; 10988 10989 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10990 for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { 10991 if (acrtc->dm_irq_params.window_param[cnt].enable) { 10992 acrtc->dm_irq_params.window_param[cnt].update_win = true; 10993 10994 /** 10995 * It takes 2 frames for HW to stably generate CRC when 10996 * resuming from suspend, so we set skip_frame_cnt 2. 10997 */ 10998 acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2; 10999 } 11000 } 11001 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 11002 } 11003 #endif 11004 if (amdgpu_dm_crtc_configure_crc_source( 11005 crtc, dm_new_crtc_state, cur_crc_src)) 11006 drm_dbg_atomic(dev, "Failed to configure crc source"); 11007 } 11008 } 11009 #endif 11010 } 11011 11012 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 11013 if (new_crtc_state->async_flip) 11014 wait_for_vblank = false; 11015 11016 /* update planes when needed per crtc*/ 11017 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 11018 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11019 11020 if (dm_new_crtc_state->stream) 11021 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 11022 } 11023 11024 /* Enable writeback */ 11025 for_each_new_connector_in_state(state, connector, new_con_state, i) { 11026 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11027 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11028 11029 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 11030 continue; 11031 11032 if (!new_con_state->writeback_job) 11033 continue; 11034 11035 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 11036 11037 if (!new_crtc_state) 11038 continue; 11039 11040 if (acrtc->wb_enabled) 11041 continue; 11042 11043 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11044 11045 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 11046 acrtc->wb_enabled = true; 11047 } 11048 11049 /* Update audio instances for each connector. */ 11050 amdgpu_dm_commit_audio(dev, state); 11051 11052 /* restore the backlight level */ 11053 for (i = 0; i < dm->num_of_edps; i++) { 11054 if (dm->backlight_dev[i] && 11055 (dm->actual_brightness[i] != dm->brightness[i])) 11056 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 11057 } 11058 11059 /* 11060 * send vblank event on all events not handled in flip and 11061 * mark consumed event for drm_atomic_helper_commit_hw_done 11062 */ 11063 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 11064 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11065 11066 if (new_crtc_state->event) 11067 drm_send_event_locked(dev, &new_crtc_state->event->base); 11068 11069 new_crtc_state->event = NULL; 11070 } 11071 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 11072 11073 /* Signal HW programming completion */ 11074 drm_atomic_helper_commit_hw_done(state); 11075 11076 if (wait_for_vblank) 11077 drm_atomic_helper_wait_for_flip_done(dev, state); 11078 11079 drm_atomic_helper_cleanup_planes(dev, state); 11080 11081 /* Don't free the memory if we are hitting this as part of suspend. 11082 * This way we don't free any memory during suspend; see 11083 * amdgpu_bo_free_kernel(). The memory will be freed in the first 11084 * non-suspend modeset or when the driver is torn down. 11085 */ 11086 if (!adev->in_suspend) { 11087 /* return the stolen vga memory back to VRAM */ 11088 if (!adev->mman.keep_stolen_vga_memory) 11089 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 11090 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 11091 } 11092 11093 /* 11094 * Finally, drop a runtime PM reference for each newly disabled CRTC, 11095 * so we can put the GPU into runtime suspend if we're not driving any 11096 * displays anymore 11097 */ 11098 for (i = 0; i < crtc_disable_count; i++) 11099 pm_runtime_put_autosuspend(dev->dev); 11100 pm_runtime_mark_last_busy(dev->dev); 11101 11102 trace_amdgpu_dm_atomic_commit_tail_finish(state); 11103 } 11104 11105 static int dm_force_atomic_commit(struct drm_connector *connector) 11106 { 11107 int ret = 0; 11108 struct drm_device *ddev = connector->dev; 11109 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 11110 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 11111 struct drm_plane *plane = disconnected_acrtc->base.primary; 11112 struct drm_connector_state *conn_state; 11113 struct drm_crtc_state *crtc_state; 11114 struct drm_plane_state *plane_state; 11115 11116 if (!state) 11117 return -ENOMEM; 11118 11119 state->acquire_ctx = ddev->mode_config.acquire_ctx; 11120 11121 /* Construct an atomic state to restore previous display setting */ 11122 11123 /* 11124 * Attach connectors to drm_atomic_state 11125 */ 11126 conn_state = drm_atomic_get_connector_state(state, connector); 11127 11128 /* Check for error in getting connector state */ 11129 if (IS_ERR(conn_state)) { 11130 ret = PTR_ERR(conn_state); 11131 goto out; 11132 } 11133 11134 /* Attach crtc to drm_atomic_state*/ 11135 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 11136 11137 /* Check for error in getting crtc state */ 11138 if (IS_ERR(crtc_state)) { 11139 ret = PTR_ERR(crtc_state); 11140 goto out; 11141 } 11142 11143 /* force a restore */ 11144 crtc_state->mode_changed = true; 11145 11146 /* Attach plane to drm_atomic_state */ 11147 plane_state = drm_atomic_get_plane_state(state, plane); 11148 11149 /* Check for error in getting plane state */ 11150 if (IS_ERR(plane_state)) { 11151 ret = PTR_ERR(plane_state); 11152 goto out; 11153 } 11154 11155 /* Call commit internally with the state we just constructed */ 11156 ret = drm_atomic_commit(state); 11157 11158 out: 11159 drm_atomic_state_put(state); 11160 if (ret) 11161 drm_err(ddev, "Restoring old state failed with %i\n", ret); 11162 11163 return ret; 11164 } 11165 11166 /* 11167 * This function handles all cases when set mode does not come upon hotplug. 11168 * This includes when a display is unplugged then plugged back into the 11169 * same port and when running without usermode desktop manager supprot 11170 */ 11171 void dm_restore_drm_connector_state(struct drm_device *dev, 11172 struct drm_connector *connector) 11173 { 11174 struct amdgpu_dm_connector *aconnector; 11175 struct amdgpu_crtc *disconnected_acrtc; 11176 struct dm_crtc_state *acrtc_state; 11177 11178 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11179 return; 11180 11181 aconnector = to_amdgpu_dm_connector(connector); 11182 11183 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 11184 return; 11185 11186 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 11187 if (!disconnected_acrtc) 11188 return; 11189 11190 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 11191 if (!acrtc_state->stream) 11192 return; 11193 11194 /* 11195 * If the previous sink is not released and different from the current, 11196 * we deduce we are in a state where we can not rely on usermode call 11197 * to turn on the display, so we do it here 11198 */ 11199 if (acrtc_state->stream->sink != aconnector->dc_sink) 11200 dm_force_atomic_commit(&aconnector->base); 11201 } 11202 11203 /* 11204 * Grabs all modesetting locks to serialize against any blocking commits, 11205 * Waits for completion of all non blocking commits. 11206 */ 11207 static int do_aquire_global_lock(struct drm_device *dev, 11208 struct drm_atomic_state *state) 11209 { 11210 struct drm_crtc *crtc; 11211 struct drm_crtc_commit *commit; 11212 long ret; 11213 11214 /* 11215 * Adding all modeset locks to aquire_ctx will 11216 * ensure that when the framework release it the 11217 * extra locks we are locking here will get released to 11218 */ 11219 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 11220 if (ret) 11221 return ret; 11222 11223 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 11224 spin_lock(&crtc->commit_lock); 11225 commit = list_first_entry_or_null(&crtc->commit_list, 11226 struct drm_crtc_commit, commit_entry); 11227 if (commit) 11228 drm_crtc_commit_get(commit); 11229 spin_unlock(&crtc->commit_lock); 11230 11231 if (!commit) 11232 continue; 11233 11234 /* 11235 * Make sure all pending HW programming completed and 11236 * page flips done 11237 */ 11238 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 11239 11240 if (ret > 0) 11241 ret = wait_for_completion_interruptible_timeout( 11242 &commit->flip_done, 10*HZ); 11243 11244 if (ret == 0) 11245 drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n", 11246 crtc->base.id, crtc->name); 11247 11248 drm_crtc_commit_put(commit); 11249 } 11250 11251 return ret < 0 ? ret : 0; 11252 } 11253 11254 static void get_freesync_config_for_crtc( 11255 struct dm_crtc_state *new_crtc_state, 11256 struct dm_connector_state *new_con_state) 11257 { 11258 struct mod_freesync_config config = {0}; 11259 struct amdgpu_dm_connector *aconnector; 11260 struct drm_display_mode *mode = &new_crtc_state->base.mode; 11261 int vrefresh = drm_mode_vrefresh(mode); 11262 bool fs_vid_mode = false; 11263 11264 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11265 return; 11266 11267 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 11268 11269 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 11270 vrefresh >= aconnector->min_vfreq && 11271 vrefresh <= aconnector->max_vfreq; 11272 11273 if (new_crtc_state->vrr_supported) { 11274 new_crtc_state->stream->ignore_msa_timing_param = true; 11275 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 11276 11277 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 11278 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 11279 config.vsif_supported = true; 11280 config.btr = true; 11281 11282 if (fs_vid_mode) { 11283 config.state = VRR_STATE_ACTIVE_FIXED; 11284 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 11285 goto out; 11286 } else if (new_crtc_state->base.vrr_enabled) { 11287 config.state = VRR_STATE_ACTIVE_VARIABLE; 11288 } else { 11289 config.state = VRR_STATE_INACTIVE; 11290 } 11291 } else { 11292 config.state = VRR_STATE_UNSUPPORTED; 11293 } 11294 out: 11295 new_crtc_state->freesync_config = config; 11296 } 11297 11298 static void reset_freesync_config_for_crtc( 11299 struct dm_crtc_state *new_crtc_state) 11300 { 11301 new_crtc_state->vrr_supported = false; 11302 11303 memset(&new_crtc_state->vrr_infopacket, 0, 11304 sizeof(new_crtc_state->vrr_infopacket)); 11305 } 11306 11307 static bool 11308 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 11309 struct drm_crtc_state *new_crtc_state) 11310 { 11311 const struct drm_display_mode *old_mode, *new_mode; 11312 11313 if (!old_crtc_state || !new_crtc_state) 11314 return false; 11315 11316 old_mode = &old_crtc_state->mode; 11317 new_mode = &new_crtc_state->mode; 11318 11319 if (old_mode->clock == new_mode->clock && 11320 old_mode->hdisplay == new_mode->hdisplay && 11321 old_mode->vdisplay == new_mode->vdisplay && 11322 old_mode->htotal == new_mode->htotal && 11323 old_mode->vtotal != new_mode->vtotal && 11324 old_mode->hsync_start == new_mode->hsync_start && 11325 old_mode->vsync_start != new_mode->vsync_start && 11326 old_mode->hsync_end == new_mode->hsync_end && 11327 old_mode->vsync_end != new_mode->vsync_end && 11328 old_mode->hskew == new_mode->hskew && 11329 old_mode->vscan == new_mode->vscan && 11330 (old_mode->vsync_end - old_mode->vsync_start) == 11331 (new_mode->vsync_end - new_mode->vsync_start)) 11332 return true; 11333 11334 return false; 11335 } 11336 11337 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 11338 { 11339 u64 num, den, res; 11340 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 11341 11342 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 11343 11344 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 11345 den = (unsigned long long)new_crtc_state->mode.htotal * 11346 (unsigned long long)new_crtc_state->mode.vtotal; 11347 11348 res = div_u64(num, den); 11349 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 11350 } 11351 11352 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 11353 struct drm_atomic_state *state, 11354 struct drm_crtc *crtc, 11355 struct drm_crtc_state *old_crtc_state, 11356 struct drm_crtc_state *new_crtc_state, 11357 bool enable, 11358 bool *lock_and_validation_needed) 11359 { 11360 struct dm_atomic_state *dm_state = NULL; 11361 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11362 struct dc_stream_state *new_stream; 11363 struct amdgpu_device *adev = dm->adev; 11364 int ret = 0; 11365 11366 /* 11367 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 11368 * update changed items 11369 */ 11370 struct amdgpu_crtc *acrtc = NULL; 11371 struct drm_connector *connector = NULL; 11372 struct amdgpu_dm_connector *aconnector = NULL; 11373 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 11374 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 11375 11376 new_stream = NULL; 11377 11378 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11379 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11380 acrtc = to_amdgpu_crtc(crtc); 11381 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 11382 if (connector) 11383 aconnector = to_amdgpu_dm_connector(connector); 11384 11385 /* TODO This hack should go away */ 11386 if (connector && enable) { 11387 /* Make sure fake sink is created in plug-in scenario */ 11388 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 11389 connector); 11390 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 11391 connector); 11392 11393 if (WARN_ON(!drm_new_conn_state)) { 11394 ret = -EINVAL; 11395 goto fail; 11396 } 11397 11398 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 11399 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 11400 11401 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 11402 goto skip_modeset; 11403 11404 new_stream = create_validate_stream_for_sink(connector, 11405 &new_crtc_state->mode, 11406 dm_new_conn_state, 11407 dm_old_crtc_state->stream); 11408 11409 /* 11410 * we can have no stream on ACTION_SET if a display 11411 * was disconnected during S3, in this case it is not an 11412 * error, the OS will be updated after detection, and 11413 * will do the right thing on next atomic commit 11414 */ 11415 11416 if (!new_stream) { 11417 drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", 11418 __func__, acrtc->base.base.id); 11419 ret = -ENOMEM; 11420 goto fail; 11421 } 11422 11423 /* 11424 * TODO: Check VSDB bits to decide whether this should 11425 * be enabled or not. 11426 */ 11427 new_stream->triggered_crtc_reset.enabled = 11428 dm->force_timing_sync; 11429 11430 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 11431 11432 ret = fill_hdr_info_packet(drm_new_conn_state, 11433 &new_stream->hdr_static_metadata); 11434 if (ret) 11435 goto fail; 11436 11437 /* 11438 * If we already removed the old stream from the context 11439 * (and set the new stream to NULL) then we can't reuse 11440 * the old stream even if the stream and scaling are unchanged. 11441 * We'll hit the BUG_ON and black screen. 11442 * 11443 * TODO: Refactor this function to allow this check to work 11444 * in all conditions. 11445 */ 11446 if (amdgpu_freesync_vid_mode && 11447 dm_new_crtc_state->stream && 11448 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 11449 goto skip_modeset; 11450 11451 if (dm_new_crtc_state->stream && 11452 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 11453 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 11454 new_crtc_state->mode_changed = false; 11455 drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", 11456 new_crtc_state->mode_changed); 11457 } 11458 } 11459 11460 /* mode_changed flag may get updated above, need to check again */ 11461 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 11462 goto skip_modeset; 11463 11464 drm_dbg_state(state->dev, 11465 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 11466 acrtc->crtc_id, 11467 new_crtc_state->enable, 11468 new_crtc_state->active, 11469 new_crtc_state->planes_changed, 11470 new_crtc_state->mode_changed, 11471 new_crtc_state->active_changed, 11472 new_crtc_state->connectors_changed); 11473 11474 /* Remove stream for any changed/disabled CRTC */ 11475 if (!enable) { 11476 11477 if (!dm_old_crtc_state->stream) 11478 goto skip_modeset; 11479 11480 /* Unset freesync video if it was active before */ 11481 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 11482 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 11483 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 11484 } 11485 11486 /* Now check if we should set freesync video mode */ 11487 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 11488 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 11489 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 11490 is_timing_unchanged_for_freesync(new_crtc_state, 11491 old_crtc_state)) { 11492 new_crtc_state->mode_changed = false; 11493 drm_dbg_driver(adev_to_drm(adev), 11494 "Mode change not required for front porch change, setting mode_changed to %d", 11495 new_crtc_state->mode_changed); 11496 11497 set_freesync_fixed_config(dm_new_crtc_state); 11498 11499 goto skip_modeset; 11500 } else if (amdgpu_freesync_vid_mode && aconnector && 11501 is_freesync_video_mode(&new_crtc_state->mode, 11502 aconnector)) { 11503 struct drm_display_mode *high_mode; 11504 11505 high_mode = get_highest_refresh_rate_mode(aconnector, false); 11506 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 11507 set_freesync_fixed_config(dm_new_crtc_state); 11508 } 11509 11510 ret = dm_atomic_get_state(state, &dm_state); 11511 if (ret) 11512 goto fail; 11513 11514 drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", 11515 crtc->base.id); 11516 11517 /* i.e. reset mode */ 11518 if (dc_state_remove_stream( 11519 dm->dc, 11520 dm_state->context, 11521 dm_old_crtc_state->stream) != DC_OK) { 11522 ret = -EINVAL; 11523 goto fail; 11524 } 11525 11526 dc_stream_release(dm_old_crtc_state->stream); 11527 dm_new_crtc_state->stream = NULL; 11528 11529 reset_freesync_config_for_crtc(dm_new_crtc_state); 11530 11531 *lock_and_validation_needed = true; 11532 11533 } else {/* Add stream for any updated/enabled CRTC */ 11534 /* 11535 * Quick fix to prevent NULL pointer on new_stream when 11536 * added MST connectors not found in existing crtc_state in the chained mode 11537 * TODO: need to dig out the root cause of that 11538 */ 11539 if (!connector) 11540 goto skip_modeset; 11541 11542 if (modereset_required(new_crtc_state)) 11543 goto skip_modeset; 11544 11545 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 11546 dm_old_crtc_state->stream)) { 11547 11548 WARN_ON(dm_new_crtc_state->stream); 11549 11550 ret = dm_atomic_get_state(state, &dm_state); 11551 if (ret) 11552 goto fail; 11553 11554 dm_new_crtc_state->stream = new_stream; 11555 11556 dc_stream_retain(new_stream); 11557 11558 drm_dbg_atomic(adev_to_drm(adev), "Enabling DRM crtc: %d\n", 11559 crtc->base.id); 11560 11561 if (dc_state_add_stream( 11562 dm->dc, 11563 dm_state->context, 11564 dm_new_crtc_state->stream) != DC_OK) { 11565 ret = -EINVAL; 11566 goto fail; 11567 } 11568 11569 *lock_and_validation_needed = true; 11570 } 11571 } 11572 11573 skip_modeset: 11574 /* Release extra reference */ 11575 if (new_stream) 11576 dc_stream_release(new_stream); 11577 11578 /* 11579 * We want to do dc stream updates that do not require a 11580 * full modeset below. 11581 */ 11582 if (!(enable && connector && new_crtc_state->active)) 11583 return 0; 11584 /* 11585 * Given above conditions, the dc state cannot be NULL because: 11586 * 1. We're in the process of enabling CRTCs (just been added 11587 * to the dc context, or already is on the context) 11588 * 2. Has a valid connector attached, and 11589 * 3. Is currently active and enabled. 11590 * => The dc stream state currently exists. 11591 */ 11592 BUG_ON(dm_new_crtc_state->stream == NULL); 11593 11594 /* Scaling or underscan settings */ 11595 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 11596 drm_atomic_crtc_needs_modeset(new_crtc_state)) 11597 update_stream_scaling_settings(adev_to_drm(adev), 11598 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 11599 11600 /* ABM settings */ 11601 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 11602 11603 /* 11604 * Color management settings. We also update color properties 11605 * when a modeset is needed, to ensure it gets reprogrammed. 11606 */ 11607 if (dm_new_crtc_state->base.color_mgmt_changed || 11608 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 11609 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11610 ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true); 11611 if (ret) 11612 goto fail; 11613 } 11614 11615 /* Update Freesync settings. */ 11616 get_freesync_config_for_crtc(dm_new_crtc_state, 11617 dm_new_conn_state); 11618 11619 return ret; 11620 11621 fail: 11622 if (new_stream) 11623 dc_stream_release(new_stream); 11624 return ret; 11625 } 11626 11627 static bool should_reset_plane(struct drm_atomic_state *state, 11628 struct drm_plane *plane, 11629 struct drm_plane_state *old_plane_state, 11630 struct drm_plane_state *new_plane_state) 11631 { 11632 struct drm_plane *other; 11633 struct drm_plane_state *old_other_state, *new_other_state; 11634 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11635 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 11636 struct amdgpu_device *adev = drm_to_adev(plane->dev); 11637 struct drm_connector_state *new_con_state; 11638 struct drm_connector *connector; 11639 int i; 11640 11641 /* 11642 * TODO: Remove this hack for all asics once it proves that the 11643 * fast updates works fine on DCN3.2+. 11644 */ 11645 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 11646 state->allow_modeset) 11647 return true; 11648 11649 /* Check for writeback commit */ 11650 for_each_new_connector_in_state(state, connector, new_con_state, i) { 11651 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 11652 continue; 11653 11654 if (new_con_state->writeback_job) 11655 return true; 11656 } 11657 11658 if (amdgpu_in_reset(adev) && state->allow_modeset) 11659 return true; 11660 11661 /* Exit early if we know that we're adding or removing the plane. */ 11662 if (old_plane_state->crtc != new_plane_state->crtc) 11663 return true; 11664 11665 /* old crtc == new_crtc == NULL, plane not in context. */ 11666 if (!new_plane_state->crtc) 11667 return false; 11668 11669 new_crtc_state = 11670 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 11671 old_crtc_state = 11672 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); 11673 11674 if (!new_crtc_state) 11675 return true; 11676 11677 /* 11678 * A change in cursor mode means a new dc pipe needs to be acquired or 11679 * released from the state 11680 */ 11681 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 11682 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 11683 if (plane->type == DRM_PLANE_TYPE_CURSOR && 11684 old_dm_crtc_state != NULL && 11685 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { 11686 return true; 11687 } 11688 11689 /* CRTC Degamma changes currently require us to recreate planes. */ 11690 if (new_crtc_state->color_mgmt_changed) 11691 return true; 11692 11693 /* 11694 * On zpos change, planes need to be reordered by removing and re-adding 11695 * them one by one to the dc state, in order of descending zpos. 11696 * 11697 * TODO: We can likely skip bandwidth validation if the only thing that 11698 * changed about the plane was it'z z-ordering. 11699 */ 11700 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) 11701 return true; 11702 11703 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 11704 return true; 11705 11706 /* 11707 * If there are any new primary or overlay planes being added or 11708 * removed then the z-order can potentially change. To ensure 11709 * correct z-order and pipe acquisition the current DC architecture 11710 * requires us to remove and recreate all existing planes. 11711 * 11712 * TODO: Come up with a more elegant solution for this. 11713 */ 11714 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 11715 struct amdgpu_framebuffer *old_afb, *new_afb; 11716 struct dm_plane_state *dm_new_other_state, *dm_old_other_state; 11717 11718 dm_new_other_state = to_dm_plane_state(new_other_state); 11719 dm_old_other_state = to_dm_plane_state(old_other_state); 11720 11721 if (other->type == DRM_PLANE_TYPE_CURSOR) 11722 continue; 11723 11724 if (old_other_state->crtc != new_plane_state->crtc && 11725 new_other_state->crtc != new_plane_state->crtc) 11726 continue; 11727 11728 if (old_other_state->crtc != new_other_state->crtc) 11729 return true; 11730 11731 /* Src/dst size and scaling updates. */ 11732 if (old_other_state->src_w != new_other_state->src_w || 11733 old_other_state->src_h != new_other_state->src_h || 11734 old_other_state->crtc_w != new_other_state->crtc_w || 11735 old_other_state->crtc_h != new_other_state->crtc_h) 11736 return true; 11737 11738 /* Rotation / mirroring updates. */ 11739 if (old_other_state->rotation != new_other_state->rotation) 11740 return true; 11741 11742 /* Blending updates. */ 11743 if (old_other_state->pixel_blend_mode != 11744 new_other_state->pixel_blend_mode) 11745 return true; 11746 11747 /* Alpha updates. */ 11748 if (old_other_state->alpha != new_other_state->alpha) 11749 return true; 11750 11751 /* Colorspace changes. */ 11752 if (old_other_state->color_range != new_other_state->color_range || 11753 old_other_state->color_encoding != new_other_state->color_encoding) 11754 return true; 11755 11756 /* HDR/Transfer Function changes. */ 11757 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || 11758 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || 11759 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || 11760 dm_old_other_state->ctm != dm_new_other_state->ctm || 11761 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || 11762 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || 11763 dm_old_other_state->lut3d != dm_new_other_state->lut3d || 11764 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || 11765 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) 11766 return true; 11767 11768 /* Framebuffer checks fall at the end. */ 11769 if (!old_other_state->fb || !new_other_state->fb) 11770 continue; 11771 11772 /* Pixel format changes can require bandwidth updates. */ 11773 if (old_other_state->fb->format != new_other_state->fb->format) 11774 return true; 11775 11776 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 11777 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 11778 11779 /* Tiling and DCC changes also require bandwidth updates. */ 11780 if (old_afb->tiling_flags != new_afb->tiling_flags || 11781 old_afb->base.modifier != new_afb->base.modifier) 11782 return true; 11783 } 11784 11785 return false; 11786 } 11787 11788 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 11789 struct drm_plane_state *new_plane_state, 11790 struct drm_framebuffer *fb) 11791 { 11792 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 11793 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 11794 unsigned int pitch; 11795 bool linear; 11796 11797 if (fb->width > new_acrtc->max_cursor_width || 11798 fb->height > new_acrtc->max_cursor_height) { 11799 drm_dbg_atomic(adev_to_drm(adev), "Bad cursor FB size %dx%d\n", 11800 new_plane_state->fb->width, 11801 new_plane_state->fb->height); 11802 return -EINVAL; 11803 } 11804 if (new_plane_state->src_w != fb->width << 16 || 11805 new_plane_state->src_h != fb->height << 16) { 11806 drm_dbg_atomic(adev_to_drm(adev), "Cropping not supported for cursor plane\n"); 11807 return -EINVAL; 11808 } 11809 11810 /* Pitch in pixels */ 11811 pitch = fb->pitches[0] / fb->format->cpp[0]; 11812 11813 if (fb->width != pitch) { 11814 drm_dbg_atomic(adev_to_drm(adev), "Cursor FB width %d doesn't match pitch %d", 11815 fb->width, pitch); 11816 return -EINVAL; 11817 } 11818 11819 switch (pitch) { 11820 case 64: 11821 case 128: 11822 case 256: 11823 /* FB pitch is supported by cursor plane */ 11824 break; 11825 default: 11826 drm_dbg_atomic(adev_to_drm(adev), "Bad cursor FB pitch %d px\n", pitch); 11827 return -EINVAL; 11828 } 11829 11830 /* Core DRM takes care of checking FB modifiers, so we only need to 11831 * check tiling flags when the FB doesn't have a modifier. 11832 */ 11833 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 11834 if (adev->family == AMDGPU_FAMILY_GC_12_0_0) { 11835 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; 11836 } else if (adev->family >= AMDGPU_FAMILY_AI) { 11837 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 11838 } else { 11839 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 11840 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 11841 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 11842 } 11843 if (!linear) { 11844 drm_dbg_atomic(adev_to_drm(adev), "Cursor FB not linear"); 11845 return -EINVAL; 11846 } 11847 } 11848 11849 return 0; 11850 } 11851 11852 /* 11853 * Helper function for checking the cursor in native mode 11854 */ 11855 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, 11856 struct drm_plane *plane, 11857 struct drm_plane_state *new_plane_state, 11858 bool enable) 11859 { 11860 11861 struct amdgpu_crtc *new_acrtc; 11862 int ret; 11863 11864 if (!enable || !new_plane_crtc || 11865 drm_atomic_plane_disabling(plane->state, new_plane_state)) 11866 return 0; 11867 11868 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 11869 11870 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 11871 drm_dbg_atomic(new_plane_crtc->dev, "Cropping not supported for cursor plane\n"); 11872 return -EINVAL; 11873 } 11874 11875 if (new_plane_state->fb) { 11876 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 11877 new_plane_state->fb); 11878 if (ret) 11879 return ret; 11880 } 11881 11882 return 0; 11883 } 11884 11885 static bool dm_should_update_native_cursor(struct drm_atomic_state *state, 11886 struct drm_crtc *old_plane_crtc, 11887 struct drm_crtc *new_plane_crtc, 11888 bool enable) 11889 { 11890 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11891 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11892 11893 if (!enable) { 11894 if (old_plane_crtc == NULL) 11895 return true; 11896 11897 old_crtc_state = drm_atomic_get_old_crtc_state( 11898 state, old_plane_crtc); 11899 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11900 11901 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 11902 } else { 11903 if (new_plane_crtc == NULL) 11904 return true; 11905 11906 new_crtc_state = drm_atomic_get_new_crtc_state( 11907 state, new_plane_crtc); 11908 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11909 11910 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 11911 } 11912 } 11913 11914 static int dm_update_plane_state(struct dc *dc, 11915 struct drm_atomic_state *state, 11916 struct drm_plane *plane, 11917 struct drm_plane_state *old_plane_state, 11918 struct drm_plane_state *new_plane_state, 11919 bool enable, 11920 bool *lock_and_validation_needed, 11921 bool *is_top_most_overlay) 11922 { 11923 11924 struct dm_atomic_state *dm_state = NULL; 11925 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 11926 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11927 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 11928 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 11929 bool needs_reset, update_native_cursor; 11930 int ret = 0; 11931 11932 11933 new_plane_crtc = new_plane_state->crtc; 11934 old_plane_crtc = old_plane_state->crtc; 11935 dm_new_plane_state = to_dm_plane_state(new_plane_state); 11936 dm_old_plane_state = to_dm_plane_state(old_plane_state); 11937 11938 update_native_cursor = dm_should_update_native_cursor(state, 11939 old_plane_crtc, 11940 new_plane_crtc, 11941 enable); 11942 11943 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { 11944 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11945 new_plane_state, enable); 11946 if (ret) 11947 return ret; 11948 11949 return 0; 11950 } 11951 11952 needs_reset = should_reset_plane(state, plane, old_plane_state, 11953 new_plane_state); 11954 11955 /* Remove any changed/removed planes */ 11956 if (!enable) { 11957 if (!needs_reset) 11958 return 0; 11959 11960 if (!old_plane_crtc) 11961 return 0; 11962 11963 old_crtc_state = drm_atomic_get_old_crtc_state( 11964 state, old_plane_crtc); 11965 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11966 11967 if (!dm_old_crtc_state->stream) 11968 return 0; 11969 11970 drm_dbg_atomic(old_plane_crtc->dev, "Disabling DRM plane: %d on DRM crtc %d\n", 11971 plane->base.id, old_plane_crtc->base.id); 11972 11973 ret = dm_atomic_get_state(state, &dm_state); 11974 if (ret) 11975 return ret; 11976 11977 if (!dc_state_remove_plane( 11978 dc, 11979 dm_old_crtc_state->stream, 11980 dm_old_plane_state->dc_state, 11981 dm_state->context)) { 11982 11983 return -EINVAL; 11984 } 11985 11986 if (dm_old_plane_state->dc_state) 11987 dc_plane_state_release(dm_old_plane_state->dc_state); 11988 11989 dm_new_plane_state->dc_state = NULL; 11990 11991 *lock_and_validation_needed = true; 11992 11993 } else { /* Add new planes */ 11994 struct dc_plane_state *dc_new_plane_state; 11995 11996 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 11997 return 0; 11998 11999 if (!new_plane_crtc) 12000 return 0; 12001 12002 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 12003 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 12004 12005 if (!dm_new_crtc_state->stream) 12006 return 0; 12007 12008 if (!needs_reset) 12009 return 0; 12010 12011 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 12012 if (ret) 12013 goto out; 12014 12015 WARN_ON(dm_new_plane_state->dc_state); 12016 12017 dc_new_plane_state = dc_create_plane_state(dc); 12018 if (!dc_new_plane_state) { 12019 ret = -ENOMEM; 12020 goto out; 12021 } 12022 12023 drm_dbg_atomic(new_plane_crtc->dev, "Enabling DRM plane: %d on DRM crtc %d\n", 12024 plane->base.id, new_plane_crtc->base.id); 12025 12026 ret = fill_dc_plane_attributes( 12027 drm_to_adev(new_plane_crtc->dev), 12028 dc_new_plane_state, 12029 new_plane_state, 12030 new_crtc_state); 12031 if (ret) { 12032 dc_plane_state_release(dc_new_plane_state); 12033 goto out; 12034 } 12035 12036 ret = dm_atomic_get_state(state, &dm_state); 12037 if (ret) { 12038 dc_plane_state_release(dc_new_plane_state); 12039 goto out; 12040 } 12041 12042 /* 12043 * Any atomic check errors that occur after this will 12044 * not need a release. The plane state will be attached 12045 * to the stream, and therefore part of the atomic 12046 * state. It'll be released when the atomic state is 12047 * cleaned. 12048 */ 12049 if (!dc_state_add_plane( 12050 dc, 12051 dm_new_crtc_state->stream, 12052 dc_new_plane_state, 12053 dm_state->context)) { 12054 12055 dc_plane_state_release(dc_new_plane_state); 12056 ret = -EINVAL; 12057 goto out; 12058 } 12059 12060 dm_new_plane_state->dc_state = dc_new_plane_state; 12061 12062 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 12063 12064 /* Tell DC to do a full surface update every time there 12065 * is a plane change. Inefficient, but works for now. 12066 */ 12067 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 12068 12069 *lock_and_validation_needed = true; 12070 } 12071 12072 out: 12073 /* If enabling cursor overlay failed, attempt fallback to native mode */ 12074 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { 12075 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 12076 new_plane_state, enable); 12077 if (ret) 12078 return ret; 12079 12080 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; 12081 } 12082 12083 return ret; 12084 } 12085 12086 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 12087 int *src_w, int *src_h) 12088 { 12089 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 12090 case DRM_MODE_ROTATE_90: 12091 case DRM_MODE_ROTATE_270: 12092 *src_w = plane_state->src_h >> 16; 12093 *src_h = plane_state->src_w >> 16; 12094 break; 12095 case DRM_MODE_ROTATE_0: 12096 case DRM_MODE_ROTATE_180: 12097 default: 12098 *src_w = plane_state->src_w >> 16; 12099 *src_h = plane_state->src_h >> 16; 12100 break; 12101 } 12102 } 12103 12104 static void 12105 dm_get_plane_scale(struct drm_plane_state *plane_state, 12106 int *out_plane_scale_w, int *out_plane_scale_h) 12107 { 12108 int plane_src_w, plane_src_h; 12109 12110 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 12111 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; 12112 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; 12113 } 12114 12115 /* 12116 * The normalized_zpos value cannot be used by this iterator directly. It's only 12117 * calculated for enabled planes, potentially causing normalized_zpos collisions 12118 * between enabled/disabled planes in the atomic state. We need a unique value 12119 * so that the iterator will not generate the same object twice, or loop 12120 * indefinitely. 12121 */ 12122 static inline struct __drm_planes_state *__get_next_zpos( 12123 struct drm_atomic_state *state, 12124 struct __drm_planes_state *prev) 12125 { 12126 unsigned int highest_zpos = 0, prev_zpos = 256; 12127 uint32_t highest_id = 0, prev_id = UINT_MAX; 12128 struct drm_plane_state *new_plane_state; 12129 struct drm_plane *plane; 12130 int i, highest_i = -1; 12131 12132 if (prev != NULL) { 12133 prev_zpos = prev->new_state->zpos; 12134 prev_id = prev->ptr->base.id; 12135 } 12136 12137 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 12138 /* Skip planes with higher zpos than the previously returned */ 12139 if (new_plane_state->zpos > prev_zpos || 12140 (new_plane_state->zpos == prev_zpos && 12141 plane->base.id >= prev_id)) 12142 continue; 12143 12144 /* Save the index of the plane with highest zpos */ 12145 if (new_plane_state->zpos > highest_zpos || 12146 (new_plane_state->zpos == highest_zpos && 12147 plane->base.id > highest_id)) { 12148 highest_zpos = new_plane_state->zpos; 12149 highest_id = plane->base.id; 12150 highest_i = i; 12151 } 12152 } 12153 12154 if (highest_i < 0) 12155 return NULL; 12156 12157 return &state->planes[highest_i]; 12158 } 12159 12160 /* 12161 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate 12162 * by descending zpos, as read from the new plane state. This is the same 12163 * ordering as defined by drm_atomic_normalize_zpos(). 12164 */ 12165 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ 12166 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ 12167 __i != NULL; __i = __get_next_zpos((__state), __i)) \ 12168 for_each_if(((plane) = __i->ptr, \ 12169 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ 12170 (old_plane_state) = __i->old_state, \ 12171 (new_plane_state) = __i->new_state, 1)) 12172 12173 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 12174 { 12175 struct drm_connector *connector; 12176 struct drm_connector_state *conn_state, *old_conn_state; 12177 struct amdgpu_dm_connector *aconnector = NULL; 12178 int i; 12179 12180 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 12181 if (!conn_state->crtc) 12182 conn_state = old_conn_state; 12183 12184 if (conn_state->crtc != crtc) 12185 continue; 12186 12187 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 12188 continue; 12189 12190 aconnector = to_amdgpu_dm_connector(connector); 12191 if (!aconnector->mst_output_port || !aconnector->mst_root) 12192 aconnector = NULL; 12193 else 12194 break; 12195 } 12196 12197 if (!aconnector) 12198 return 0; 12199 12200 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 12201 } 12202 12203 /** 12204 * DOC: Cursor Modes - Native vs Overlay 12205 * 12206 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw 12207 * plane. It does not require a dedicated hw plane to enable, but it is 12208 * subjected to the same z-order and scaling as the hw plane. It also has format 12209 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB 12210 * hw plane. 12211 * 12212 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its 12213 * own scaling and z-pos. It also has no blending restrictions. It lends to a 12214 * cursor behavior more akin to a DRM client's expectations. However, it does 12215 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is 12216 * available. 12217 */ 12218 12219 /** 12220 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 12221 * @adev: amdgpu device 12222 * @state: DRM atomic state 12223 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor 12224 * @cursor_mode: Returns the required cursor mode on dm_crtc_state 12225 * 12226 * Get whether the cursor should be enabled in native mode, or overlay mode, on 12227 * the dm_crtc_state. 12228 * 12229 * The cursor should be enabled in overlay mode if there exists an underlying 12230 * plane - on which the cursor may be blended - that is either YUV formatted, or 12231 * scaled differently from the cursor. 12232 * 12233 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 12234 * calling this function. 12235 * 12236 * Return: 0 on success, or an error code if getting the cursor plane state 12237 * failed. 12238 */ 12239 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, 12240 struct drm_atomic_state *state, 12241 struct dm_crtc_state *dm_crtc_state, 12242 enum amdgpu_dm_cursor_mode *cursor_mode) 12243 { 12244 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; 12245 struct drm_crtc_state *crtc_state = &dm_crtc_state->base; 12246 struct drm_plane *plane; 12247 bool consider_mode_change = false; 12248 bool entire_crtc_covered = false; 12249 bool cursor_changed = false; 12250 int underlying_scale_w, underlying_scale_h; 12251 int cursor_scale_w, cursor_scale_h; 12252 int i; 12253 12254 /* Overlay cursor not supported on HW before DCN 12255 * DCN401/420 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 12256 * as previous DCN generations, so enable native mode on DCN401/420 12257 */ 12258 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1) || 12259 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 2, 0)) { 12260 *cursor_mode = DM_CURSOR_NATIVE_MODE; 12261 return 0; 12262 } 12263 12264 /* Init cursor_mode to be the same as current */ 12265 *cursor_mode = dm_crtc_state->cursor_mode; 12266 12267 /* 12268 * Cursor mode can change if a plane's format changes, scale changes, is 12269 * enabled/disabled, or z-order changes. 12270 */ 12271 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 12272 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 12273 12274 /* Only care about planes on this CRTC */ 12275 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) 12276 continue; 12277 12278 if (plane->type == DRM_PLANE_TYPE_CURSOR) 12279 cursor_changed = true; 12280 12281 if (drm_atomic_plane_enabling(old_plane_state, plane_state) || 12282 drm_atomic_plane_disabling(old_plane_state, plane_state) || 12283 old_plane_state->fb->format != plane_state->fb->format) { 12284 consider_mode_change = true; 12285 break; 12286 } 12287 12288 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 12289 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 12290 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 12291 consider_mode_change = true; 12292 break; 12293 } 12294 } 12295 12296 if (!consider_mode_change && !crtc_state->zpos_changed) 12297 return 0; 12298 12299 /* 12300 * If no cursor change on this CRTC, and not enabled on this CRTC, then 12301 * no need to set cursor mode. This avoids needlessly locking the cursor 12302 * state. 12303 */ 12304 if (!cursor_changed && 12305 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { 12306 return 0; 12307 } 12308 12309 cursor_state = drm_atomic_get_plane_state(state, 12310 crtc_state->crtc->cursor); 12311 if (IS_ERR(cursor_state)) 12312 return PTR_ERR(cursor_state); 12313 12314 /* Cursor is disabled */ 12315 if (!cursor_state->fb) 12316 return 0; 12317 12318 /* For all planes in descending z-order (all of which are below cursor 12319 * as per zpos definitions), check their scaling and format 12320 */ 12321 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { 12322 12323 /* Only care about non-cursor planes on this CRTC */ 12324 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || 12325 plane->type == DRM_PLANE_TYPE_CURSOR) 12326 continue; 12327 12328 /* Underlying plane is YUV format - use overlay cursor */ 12329 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 12330 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 12331 return 0; 12332 } 12333 12334 dm_get_plane_scale(plane_state, 12335 &underlying_scale_w, &underlying_scale_h); 12336 dm_get_plane_scale(cursor_state, 12337 &cursor_scale_w, &cursor_scale_h); 12338 12339 /* Underlying plane has different scale - use overlay cursor */ 12340 if (cursor_scale_w != underlying_scale_w && 12341 cursor_scale_h != underlying_scale_h) { 12342 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 12343 return 0; 12344 } 12345 12346 /* If this plane covers the whole CRTC, no need to check planes underneath */ 12347 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && 12348 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && 12349 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { 12350 entire_crtc_covered = true; 12351 break; 12352 } 12353 } 12354 12355 /* If planes do not cover the entire CRTC, use overlay mode to enable 12356 * cursor over holes 12357 */ 12358 if (entire_crtc_covered) 12359 *cursor_mode = DM_CURSOR_NATIVE_MODE; 12360 else 12361 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 12362 12363 return 0; 12364 } 12365 12366 static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, 12367 struct drm_atomic_state *state, 12368 struct drm_crtc_state *crtc_state) 12369 { 12370 struct drm_plane *plane; 12371 struct drm_plane_state *new_plane_state, *old_plane_state; 12372 12373 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { 12374 new_plane_state = drm_atomic_get_plane_state(state, plane); 12375 old_plane_state = drm_atomic_get_plane_state(state, plane); 12376 12377 if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) { 12378 drm_err(dev, "Failed to get plane state for plane %s\n", plane->name); 12379 return false; 12380 } 12381 12382 if (old_plane_state->fb && new_plane_state->fb && 12383 get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) 12384 return true; 12385 } 12386 12387 return false; 12388 } 12389 12390 /** 12391 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 12392 * 12393 * @dev: The DRM device 12394 * @state: The atomic state to commit 12395 * 12396 * Validate that the given atomic state is programmable by DC into hardware. 12397 * This involves constructing a &struct dc_state reflecting the new hardware 12398 * state we wish to commit, then querying DC to see if it is programmable. It's 12399 * important not to modify the existing DC state. Otherwise, atomic_check 12400 * may unexpectedly commit hardware changes. 12401 * 12402 * When validating the DC state, it's important that the right locks are 12403 * acquired. For full updates case which removes/adds/updates streams on one 12404 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 12405 * that any such full update commit will wait for completion of any outstanding 12406 * flip using DRMs synchronization events. 12407 * 12408 * Note that DM adds the affected connectors for all CRTCs in state, when that 12409 * might not seem necessary. This is because DC stream creation requires the 12410 * DC sink, which is tied to the DRM connector state. Cleaning this up should 12411 * be possible but non-trivial - a possible TODO item. 12412 * 12413 * Return: -Error code if validation failed. 12414 */ 12415 static int amdgpu_dm_atomic_check(struct drm_device *dev, 12416 struct drm_atomic_state *state) 12417 { 12418 struct amdgpu_device *adev = drm_to_adev(dev); 12419 struct dm_atomic_state *dm_state = NULL; 12420 struct dc *dc = adev->dm.dc; 12421 struct drm_connector *connector; 12422 struct drm_connector_state *old_con_state, *new_con_state; 12423 struct drm_crtc *crtc; 12424 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12425 struct drm_plane *plane; 12426 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; 12427 enum dc_status status; 12428 int ret, i; 12429 bool lock_and_validation_needed = false; 12430 bool is_top_most_overlay = true; 12431 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 12432 struct drm_dp_mst_topology_mgr *mgr; 12433 struct drm_dp_mst_topology_state *mst_state; 12434 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; 12435 12436 trace_amdgpu_dm_atomic_check_begin(state); 12437 12438 ret = drm_atomic_helper_check_modeset(dev, state); 12439 if (ret) { 12440 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); 12441 goto fail; 12442 } 12443 12444 /* Check connector changes */ 12445 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 12446 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 12447 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 12448 12449 /* Skip connectors that are disabled or part of modeset already. */ 12450 if (!new_con_state->crtc) 12451 continue; 12452 12453 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 12454 if (IS_ERR(new_crtc_state)) { 12455 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); 12456 ret = PTR_ERR(new_crtc_state); 12457 goto fail; 12458 } 12459 12460 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 12461 dm_old_con_state->scaling != dm_new_con_state->scaling) 12462 new_crtc_state->connectors_changed = true; 12463 } 12464 12465 if (dc_resource_is_dsc_encoding_supported(dc)) { 12466 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12467 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 12468 ret = add_affected_mst_dsc_crtcs(state, crtc); 12469 if (ret) { 12470 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); 12471 goto fail; 12472 } 12473 } 12474 } 12475 } 12476 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12477 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 12478 12479 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 12480 !new_crtc_state->color_mgmt_changed && 12481 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 12482 dm_old_crtc_state->dsc_force_changed == false) 12483 continue; 12484 12485 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 12486 if (ret) { 12487 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); 12488 goto fail; 12489 } 12490 12491 if (!new_crtc_state->enable) 12492 continue; 12493 12494 ret = drm_atomic_add_affected_connectors(state, crtc); 12495 if (ret) { 12496 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); 12497 goto fail; 12498 } 12499 12500 ret = drm_atomic_add_affected_planes(state, crtc); 12501 if (ret) { 12502 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); 12503 goto fail; 12504 } 12505 12506 if (dm_old_crtc_state->dsc_force_changed) 12507 new_crtc_state->mode_changed = true; 12508 } 12509 12510 /* 12511 * Add all primary and overlay planes on the CRTC to the state 12512 * whenever a plane is enabled to maintain correct z-ordering 12513 * and to enable fast surface updates. 12514 */ 12515 drm_for_each_crtc(crtc, dev) { 12516 bool modified = false; 12517 12518 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 12519 if (plane->type == DRM_PLANE_TYPE_CURSOR) 12520 continue; 12521 12522 if (new_plane_state->crtc == crtc || 12523 old_plane_state->crtc == crtc) { 12524 modified = true; 12525 break; 12526 } 12527 } 12528 12529 if (!modified) 12530 continue; 12531 12532 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 12533 if (plane->type == DRM_PLANE_TYPE_CURSOR) 12534 continue; 12535 12536 new_plane_state = 12537 drm_atomic_get_plane_state(state, plane); 12538 12539 if (IS_ERR(new_plane_state)) { 12540 ret = PTR_ERR(new_plane_state); 12541 drm_dbg_atomic(dev, "new_plane_state is BAD\n"); 12542 goto fail; 12543 } 12544 } 12545 } 12546 12547 /* 12548 * DC consults the zpos (layer_index in DC terminology) to determine the 12549 * hw plane on which to enable the hw cursor (see 12550 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 12551 * atomic state, so call drm helper to normalize zpos. 12552 */ 12553 ret = drm_atomic_normalize_zpos(dev, state); 12554 if (ret) { 12555 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 12556 goto fail; 12557 } 12558 12559 /* 12560 * Determine whether cursors on each CRTC should be enabled in native or 12561 * overlay mode. 12562 */ 12563 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 12565 12566 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 12567 &dm_new_crtc_state->cursor_mode); 12568 if (ret) { 12569 drm_dbg(dev, "Failed to determine cursor mode\n"); 12570 goto fail; 12571 } 12572 12573 /* 12574 * If overlay cursor is needed, DC cannot go through the 12575 * native cursor update path. All enabled planes on the CRTC 12576 * need to be added for DC to not disable a plane by mistake 12577 */ 12578 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12579 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) { 12580 drm_dbg(dev, "Overlay cursor not supported on DCE\n"); 12581 ret = -EINVAL; 12582 goto fail; 12583 } 12584 12585 ret = drm_atomic_add_affected_planes(state, crtc); 12586 if (ret) 12587 goto fail; 12588 } 12589 } 12590 12591 /* Remove exiting planes if they are modified */ 12592 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 12593 12594 ret = dm_update_plane_state(dc, state, plane, 12595 old_plane_state, 12596 new_plane_state, 12597 false, 12598 &lock_and_validation_needed, 12599 &is_top_most_overlay); 12600 if (ret) { 12601 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 12602 goto fail; 12603 } 12604 } 12605 12606 /* Disable all crtcs which require disable */ 12607 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12608 ret = dm_update_crtc_state(&adev->dm, state, crtc, 12609 old_crtc_state, 12610 new_crtc_state, 12611 false, 12612 &lock_and_validation_needed); 12613 if (ret) { 12614 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); 12615 goto fail; 12616 } 12617 } 12618 12619 /* Enable all crtcs which require enable */ 12620 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12621 ret = dm_update_crtc_state(&adev->dm, state, crtc, 12622 old_crtc_state, 12623 new_crtc_state, 12624 true, 12625 &lock_and_validation_needed); 12626 if (ret) { 12627 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); 12628 goto fail; 12629 } 12630 } 12631 12632 /* Add new/modified planes */ 12633 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 12634 ret = dm_update_plane_state(dc, state, plane, 12635 old_plane_state, 12636 new_plane_state, 12637 true, 12638 &lock_and_validation_needed, 12639 &is_top_most_overlay); 12640 if (ret) { 12641 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 12642 goto fail; 12643 } 12644 } 12645 12646 #if defined(CONFIG_DRM_AMD_DC_FP) 12647 if (dc_resource_is_dsc_encoding_supported(dc)) { 12648 ret = pre_validate_dsc(state, &dm_state, vars); 12649 if (ret != 0) 12650 goto fail; 12651 } 12652 #endif 12653 12654 /* Run this here since we want to validate the streams we created */ 12655 ret = drm_atomic_helper_check_planes(dev, state); 12656 if (ret) { 12657 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); 12658 goto fail; 12659 } 12660 12661 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12662 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 12663 if (dm_new_crtc_state->mpo_requested) 12664 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); 12665 } 12666 12667 /* Check cursor restrictions */ 12668 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12669 enum amdgpu_dm_cursor_mode required_cursor_mode; 12670 int is_rotated, is_scaled; 12671 12672 /* Overlay cusor not subject to native cursor restrictions */ 12673 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 12674 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 12675 continue; 12676 12677 /* Check if rotation or scaling is enabled on DCN401 */ 12678 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && 12679 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 2, 0) || 12680 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1))) { 12681 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 12682 12683 is_rotated = new_cursor_state && 12684 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); 12685 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || 12686 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); 12687 12688 if (is_rotated || is_scaled) { 12689 drm_dbg_driver( 12690 crtc->dev, 12691 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", 12692 crtc->base.id, crtc->name); 12693 ret = -EINVAL; 12694 goto fail; 12695 } 12696 } 12697 12698 /* If HW can only do native cursor, check restrictions again */ 12699 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 12700 &required_cursor_mode); 12701 if (ret) { 12702 drm_dbg_driver(crtc->dev, 12703 "[CRTC:%d:%s] Checking cursor mode failed\n", 12704 crtc->base.id, crtc->name); 12705 goto fail; 12706 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12707 drm_dbg_driver(crtc->dev, 12708 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 12709 crtc->base.id, crtc->name); 12710 ret = -EINVAL; 12711 goto fail; 12712 } 12713 } 12714 12715 if (state->legacy_cursor_update) { 12716 /* 12717 * This is a fast cursor update coming from the plane update 12718 * helper, check if it can be done asynchronously for better 12719 * performance. 12720 */ 12721 state->async_update = 12722 !drm_atomic_helper_async_check(dev, state); 12723 12724 /* 12725 * Skip the remaining global validation if this is an async 12726 * update. Cursor updates can be done without affecting 12727 * state or bandwidth calcs and this avoids the performance 12728 * penalty of locking the private state object and 12729 * allocating a new dc_state. 12730 */ 12731 if (state->async_update) 12732 return 0; 12733 } 12734 12735 /* Check scaling and underscan changes*/ 12736 /* TODO Removed scaling changes validation due to inability to commit 12737 * new stream into context w\o causing full reset. Need to 12738 * decide how to handle. 12739 */ 12740 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 12741 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 12742 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 12743 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 12744 12745 /* Skip any modesets/resets */ 12746 if (!acrtc || drm_atomic_crtc_needs_modeset( 12747 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 12748 continue; 12749 12750 /* Skip any thing not scale or underscan changes */ 12751 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 12752 continue; 12753 12754 lock_and_validation_needed = true; 12755 } 12756 12757 /* set the slot info for each mst_state based on the link encoding format */ 12758 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 12759 struct amdgpu_dm_connector *aconnector; 12760 struct drm_connector *connector; 12761 struct drm_connector_list_iter iter; 12762 u8 link_coding_cap; 12763 12764 drm_connector_list_iter_begin(dev, &iter); 12765 drm_for_each_connector_iter(connector, &iter) { 12766 if (connector->index == mst_state->mgr->conn_base_id) { 12767 aconnector = to_amdgpu_dm_connector(connector); 12768 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 12769 drm_dp_mst_update_slots(mst_state, link_coding_cap); 12770 12771 break; 12772 } 12773 } 12774 drm_connector_list_iter_end(&iter); 12775 } 12776 12777 /** 12778 * Streams and planes are reset when there are changes that affect 12779 * bandwidth. Anything that affects bandwidth needs to go through 12780 * DC global validation to ensure that the configuration can be applied 12781 * to hardware. 12782 * 12783 * We have to currently stall out here in atomic_check for outstanding 12784 * commits to finish in this case because our IRQ handlers reference 12785 * DRM state directly - we can end up disabling interrupts too early 12786 * if we don't. 12787 * 12788 * TODO: Remove this stall and drop DM state private objects. 12789 */ 12790 if (lock_and_validation_needed) { 12791 ret = dm_atomic_get_state(state, &dm_state); 12792 if (ret) { 12793 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); 12794 goto fail; 12795 } 12796 12797 ret = do_aquire_global_lock(dev, state); 12798 if (ret) { 12799 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); 12800 goto fail; 12801 } 12802 12803 #if defined(CONFIG_DRM_AMD_DC_FP) 12804 if (dc_resource_is_dsc_encoding_supported(dc)) { 12805 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 12806 if (ret) { 12807 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); 12808 ret = -EINVAL; 12809 goto fail; 12810 } 12811 } 12812 #endif 12813 12814 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 12815 if (ret) { 12816 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); 12817 goto fail; 12818 } 12819 12820 /* 12821 * Perform validation of MST topology in the state: 12822 * We need to perform MST atomic check before calling 12823 * dc_validate_global_state(), or there is a chance 12824 * to get stuck in an infinite loop and hang eventually. 12825 */ 12826 ret = drm_dp_mst_atomic_check(state); 12827 if (ret) { 12828 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); 12829 goto fail; 12830 } 12831 status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY); 12832 if (status != DC_OK) { 12833 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", 12834 dc_status_to_str(status), status); 12835 ret = -EINVAL; 12836 goto fail; 12837 } 12838 } else { 12839 /* 12840 * The commit is a fast update. Fast updates shouldn't change 12841 * the DC context, affect global validation, and can have their 12842 * commit work done in parallel with other commits not touching 12843 * the same resource. If we have a new DC context as part of 12844 * the DM atomic state from validation we need to free it and 12845 * retain the existing one instead. 12846 * 12847 * Furthermore, since the DM atomic state only contains the DC 12848 * context and can safely be annulled, we can free the state 12849 * and clear the associated private object now to free 12850 * some memory and avoid a possible use-after-free later. 12851 */ 12852 12853 for (i = 0; i < state->num_private_objs; i++) { 12854 struct drm_private_obj *obj = state->private_objs[i].ptr; 12855 12856 if (obj->funcs == adev->dm.atomic_obj.funcs) { 12857 int j = state->num_private_objs-1; 12858 12859 dm_atomic_destroy_state(obj, 12860 state->private_objs[i].state_to_destroy); 12861 12862 /* If i is not at the end of the array then the 12863 * last element needs to be moved to where i was 12864 * before the array can safely be truncated. 12865 */ 12866 if (i != j) 12867 state->private_objs[i] = 12868 state->private_objs[j]; 12869 12870 state->private_objs[j].ptr = NULL; 12871 state->private_objs[j].state_to_destroy = NULL; 12872 state->private_objs[j].old_state = NULL; 12873 state->private_objs[j].new_state = NULL; 12874 12875 state->num_private_objs = j; 12876 break; 12877 } 12878 } 12879 } 12880 12881 /* Store the overall update type for use later in atomic check. */ 12882 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12883 struct dm_crtc_state *dm_new_crtc_state = 12884 to_dm_crtc_state(new_crtc_state); 12885 12886 /* 12887 * Only allow async flips for fast updates that don't change 12888 * the FB pitch, the DCC state, rotation, mem_type, etc. 12889 */ 12890 if (new_crtc_state->async_flip && 12891 (lock_and_validation_needed || 12892 amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) { 12893 drm_dbg_atomic(crtc->dev, 12894 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 12895 crtc->base.id, crtc->name); 12896 ret = -EINVAL; 12897 goto fail; 12898 } 12899 12900 dm_new_crtc_state->update_type = lock_and_validation_needed ? 12901 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 12902 } 12903 12904 /* Must be success */ 12905 WARN_ON(ret); 12906 12907 trace_amdgpu_dm_atomic_check_finish(state, ret); 12908 12909 return ret; 12910 12911 fail: 12912 if (ret == -EDEADLK) 12913 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); 12914 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 12915 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); 12916 else 12917 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); 12918 12919 trace_amdgpu_dm_atomic_check_finish(state, ret); 12920 12921 return ret; 12922 } 12923 12924 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 12925 unsigned int offset, 12926 unsigned int total_length, 12927 u8 *data, 12928 unsigned int length, 12929 struct amdgpu_hdmi_vsdb_info *vsdb) 12930 { 12931 bool res; 12932 union dmub_rb_cmd cmd; 12933 struct dmub_cmd_send_edid_cea *input; 12934 struct dmub_cmd_edid_cea_output *output; 12935 12936 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 12937 return false; 12938 12939 memset(&cmd, 0, sizeof(cmd)); 12940 12941 input = &cmd.edid_cea.data.input; 12942 12943 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 12944 cmd.edid_cea.header.sub_type = 0; 12945 cmd.edid_cea.header.payload_bytes = 12946 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 12947 input->offset = offset; 12948 input->length = length; 12949 input->cea_total_length = total_length; 12950 memcpy(input->payload, data, length); 12951 12952 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 12953 if (!res) { 12954 drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n"); 12955 return false; 12956 } 12957 12958 output = &cmd.edid_cea.data.output; 12959 12960 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 12961 if (!output->ack.success) { 12962 drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n", 12963 output->ack.offset); 12964 } 12965 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 12966 if (!output->amd_vsdb.vsdb_found) 12967 return false; 12968 12969 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 12970 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 12971 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 12972 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 12973 } else { 12974 drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); 12975 return false; 12976 } 12977 12978 return true; 12979 } 12980 12981 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 12982 u8 *edid_ext, int len, 12983 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12984 { 12985 int i; 12986 12987 /* send extension block to DMCU for parsing */ 12988 for (i = 0; i < len; i += 8) { 12989 bool res; 12990 int offset; 12991 12992 /* send 8 bytes a time */ 12993 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 12994 return false; 12995 12996 if (i+8 == len) { 12997 /* EDID block sent completed, expect result */ 12998 int version, min_rate, max_rate; 12999 13000 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 13001 if (res) { 13002 /* amd vsdb found */ 13003 vsdb_info->freesync_supported = 1; 13004 vsdb_info->amd_vsdb_version = version; 13005 vsdb_info->min_refresh_rate_hz = min_rate; 13006 vsdb_info->max_refresh_rate_hz = max_rate; 13007 return true; 13008 } 13009 /* not amd vsdb */ 13010 return false; 13011 } 13012 13013 /* check for ack*/ 13014 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 13015 if (!res) 13016 return false; 13017 } 13018 13019 return false; 13020 } 13021 13022 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 13023 u8 *edid_ext, int len, 13024 struct amdgpu_hdmi_vsdb_info *vsdb_info) 13025 { 13026 int i; 13027 13028 /* send extension block to DMCU for parsing */ 13029 for (i = 0; i < len; i += 8) { 13030 /* send 8 bytes a time */ 13031 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 13032 return false; 13033 } 13034 13035 return vsdb_info->freesync_supported; 13036 } 13037 13038 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 13039 u8 *edid_ext, int len, 13040 struct amdgpu_hdmi_vsdb_info *vsdb_info) 13041 { 13042 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 13043 bool ret; 13044 13045 mutex_lock(&adev->dm.dc_lock); 13046 if (adev->dm.dmub_srv) 13047 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 13048 else 13049 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 13050 mutex_unlock(&adev->dm.dc_lock); 13051 return ret; 13052 } 13053 13054 static void parse_edid_displayid_vrr(struct drm_connector *connector, 13055 const struct edid *edid) 13056 { 13057 u8 *edid_ext = NULL; 13058 int i; 13059 int j = 0; 13060 u16 min_vfreq; 13061 u16 max_vfreq; 13062 13063 if (edid == NULL || edid->extensions == 0) 13064 return; 13065 13066 /* Find DisplayID extension */ 13067 for (i = 0; i < edid->extensions; i++) { 13068 edid_ext = (void *)(edid + (i + 1)); 13069 if (edid_ext[0] == DISPLAYID_EXT) 13070 break; 13071 } 13072 13073 if (edid_ext == NULL) 13074 return; 13075 13076 while (j < EDID_LENGTH) { 13077 /* Get dynamic video timing range from DisplayID if available */ 13078 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 13079 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 13080 min_vfreq = edid_ext[j+9]; 13081 if (edid_ext[j+1] & 7) 13082 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 13083 else 13084 max_vfreq = edid_ext[j+10]; 13085 13086 if (max_vfreq && min_vfreq) { 13087 connector->display_info.monitor_range.max_vfreq = max_vfreq; 13088 connector->display_info.monitor_range.min_vfreq = min_vfreq; 13089 13090 return; 13091 } 13092 } 13093 j++; 13094 } 13095 } 13096 13097 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 13098 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 13099 { 13100 u8 *edid_ext = NULL; 13101 int i; 13102 int j = 0; 13103 int total_ext_block_len; 13104 13105 if (edid == NULL || edid->extensions == 0) 13106 return -ENODEV; 13107 13108 /* Find DisplayID extension */ 13109 for (i = 0; i < edid->extensions; i++) { 13110 edid_ext = (void *)(edid + (i + 1)); 13111 if (edid_ext[0] == DISPLAYID_EXT) 13112 break; 13113 } 13114 13115 total_ext_block_len = EDID_LENGTH * edid->extensions; 13116 while (j < total_ext_block_len - sizeof(struct amd_vsdb_block)) { 13117 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 13118 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 13119 13120 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 13121 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 13122 u8 panel_type; 13123 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 13124 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 13125 drm_dbg_kms(aconnector->base.dev, "Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 13126 panel_type = (amd_vsdb->color_space_eotf_support & AMD_VDSB_VERSION_3_PANEL_TYPE_MASK) >> AMD_VDSB_VERSION_3_PANEL_TYPE_SHIFT; 13127 switch (panel_type) { 13128 case AMD_VSDB_PANEL_TYPE_OLED: 13129 aconnector->dc_link->panel_type = PANEL_TYPE_OLED; 13130 break; 13131 case AMD_VSDB_PANEL_TYPE_MINILED: 13132 aconnector->dc_link->panel_type = PANEL_TYPE_MINILED; 13133 break; 13134 default: 13135 aconnector->dc_link->panel_type = PANEL_TYPE_NONE; 13136 break; 13137 } 13138 drm_dbg_kms(aconnector->base.dev, "Panel type: %d\n", 13139 aconnector->dc_link->panel_type); 13140 13141 return true; 13142 } 13143 j++; 13144 } 13145 13146 return false; 13147 } 13148 13149 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 13150 const struct edid *edid, 13151 struct amdgpu_hdmi_vsdb_info *vsdb_info) 13152 { 13153 u8 *edid_ext = NULL; 13154 int i; 13155 bool valid_vsdb_found = false; 13156 13157 /*----- drm_find_cea_extension() -----*/ 13158 /* No EDID or EDID extensions */ 13159 if (edid == NULL || edid->extensions == 0) 13160 return -ENODEV; 13161 13162 /* Find CEA extension */ 13163 for (i = 0; i < edid->extensions; i++) { 13164 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 13165 if (edid_ext[0] == CEA_EXT) 13166 break; 13167 } 13168 13169 if (i == edid->extensions) 13170 return -ENODEV; 13171 13172 /*----- cea_db_offsets() -----*/ 13173 if (edid_ext[0] != CEA_EXT) 13174 return -ENODEV; 13175 13176 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 13177 13178 return valid_vsdb_found ? i : -ENODEV; 13179 } 13180 13181 /** 13182 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 13183 * 13184 * @connector: Connector to query. 13185 * @drm_edid: DRM EDID from monitor 13186 * 13187 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 13188 * track of some of the display information in the internal data struct used by 13189 * amdgpu_dm. This function checks which type of connector we need to set the 13190 * FreeSync parameters. 13191 */ 13192 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 13193 const struct drm_edid *drm_edid) 13194 { 13195 int i = 0; 13196 struct amdgpu_dm_connector *amdgpu_dm_connector = 13197 to_amdgpu_dm_connector(connector); 13198 struct dm_connector_state *dm_con_state = NULL; 13199 struct dc_sink *sink; 13200 struct amdgpu_device *adev = drm_to_adev(connector->dev); 13201 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 13202 const struct edid *edid; 13203 bool freesync_capable = false; 13204 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 13205 13206 if (!connector->state) { 13207 drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__); 13208 goto update; 13209 } 13210 13211 sink = amdgpu_dm_connector->dc_sink ? 13212 amdgpu_dm_connector->dc_sink : 13213 amdgpu_dm_connector->dc_em_sink; 13214 13215 drm_edid_connector_update(connector, drm_edid); 13216 13217 if (!drm_edid || !sink) { 13218 dm_con_state = to_dm_connector_state(connector->state); 13219 13220 amdgpu_dm_connector->min_vfreq = 0; 13221 amdgpu_dm_connector->max_vfreq = 0; 13222 freesync_capable = false; 13223 13224 goto update; 13225 } 13226 13227 dm_con_state = to_dm_connector_state(connector->state); 13228 13229 if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) 13230 goto update; 13231 13232 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 13233 13234 /* Some eDP panels only have the refresh rate range info in DisplayID */ 13235 if ((connector->display_info.monitor_range.min_vfreq == 0 || 13236 connector->display_info.monitor_range.max_vfreq == 0)) 13237 parse_edid_displayid_vrr(connector, edid); 13238 13239 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 13240 sink->sink_signal == SIGNAL_TYPE_EDP)) { 13241 if (amdgpu_dm_connector->dc_link && 13242 amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { 13243 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 13244 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 13245 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13246 freesync_capable = true; 13247 } 13248 13249 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 13250 13251 if (vsdb_info.replay_mode) { 13252 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 13253 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 13254 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 13255 } 13256 13257 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 13258 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 13259 if (i >= 0 && vsdb_info.freesync_supported) { 13260 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13261 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13262 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13263 freesync_capable = true; 13264 13265 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13266 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13267 } 13268 } 13269 13270 if (amdgpu_dm_connector->dc_link) 13271 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 13272 13273 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 13274 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 13275 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 13276 13277 amdgpu_dm_connector->pack_sdp_v1_3 = true; 13278 amdgpu_dm_connector->as_type = as_type; 13279 amdgpu_dm_connector->vsdb_info = vsdb_info; 13280 13281 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13282 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13283 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13284 freesync_capable = true; 13285 13286 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13287 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13288 } 13289 } 13290 13291 update: 13292 if (dm_con_state) 13293 dm_con_state->freesync_capable = freesync_capable; 13294 13295 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && 13296 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { 13297 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; 13298 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; 13299 } 13300 13301 if (connector->vrr_capable_property) 13302 drm_connector_set_vrr_capable_property(connector, 13303 freesync_capable); 13304 } 13305 13306 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 13307 { 13308 struct amdgpu_device *adev = drm_to_adev(dev); 13309 struct dc *dc = adev->dm.dc; 13310 int i; 13311 13312 mutex_lock(&adev->dm.dc_lock); 13313 if (dc->current_state) { 13314 for (i = 0; i < dc->current_state->stream_count; ++i) 13315 dc->current_state->streams[i] 13316 ->triggered_crtc_reset.enabled = 13317 adev->dm.force_timing_sync; 13318 13319 dm_enable_per_frame_crtc_master_sync(dc->current_state); 13320 dc_trigger_sync(dc, dc->current_state); 13321 } 13322 mutex_unlock(&adev->dm.dc_lock); 13323 } 13324 13325 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 13326 { 13327 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 13328 dc_exit_ips_for_hw_access(dc); 13329 } 13330 13331 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 13332 u32 value, const char *func_name) 13333 { 13334 #ifdef DM_CHECK_ADDR_0 13335 if (address == 0) { 13336 drm_err(adev_to_drm(ctx->driver_context), 13337 "invalid register write. address = 0"); 13338 return; 13339 } 13340 #endif 13341 13342 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 13343 cgs_write_register(ctx->cgs_device, address, value); 13344 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 13345 } 13346 13347 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 13348 const char *func_name) 13349 { 13350 u32 value; 13351 #ifdef DM_CHECK_ADDR_0 13352 if (address == 0) { 13353 drm_err(adev_to_drm(ctx->driver_context), 13354 "invalid register read; address = 0\n"); 13355 return 0; 13356 } 13357 #endif 13358 13359 if (ctx->dmub_srv && 13360 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 13361 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 13362 ASSERT(false); 13363 return 0; 13364 } 13365 13366 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 13367 13368 value = cgs_read_register(ctx->cgs_device, address); 13369 13370 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 13371 13372 return value; 13373 } 13374 13375 int amdgpu_dm_process_dmub_aux_transfer_sync( 13376 struct dc_context *ctx, 13377 unsigned int link_index, 13378 struct aux_payload *payload, 13379 enum aux_return_code_type *operation_result) 13380 { 13381 struct amdgpu_device *adev = ctx->driver_context; 13382 struct dmub_notification *p_notify = adev->dm.dmub_notify; 13383 int ret = -1; 13384 13385 mutex_lock(&adev->dm.dpia_aux_lock); 13386 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 13387 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 13388 goto out; 13389 } 13390 13391 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 13392 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); 13393 *operation_result = AUX_RET_ERROR_TIMEOUT; 13394 goto out; 13395 } 13396 13397 if (p_notify->result != AUX_RET_SUCCESS) { 13398 /* 13399 * Transient states before tunneling is enabled could 13400 * lead to this error. We can ignore this for now. 13401 */ 13402 if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { 13403 drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n", 13404 payload->address, payload->length, 13405 p_notify->result); 13406 } 13407 *operation_result = p_notify->result; 13408 goto out; 13409 } 13410 13411 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; 13412 if (adev->dm.dmub_notify->aux_reply.command & 0xF0) 13413 /* The reply is stored in the top nibble of the command. */ 13414 payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; 13415 13416 /*write req may receive a byte indicating partially written number as well*/ 13417 if (p_notify->aux_reply.length) 13418 memcpy(payload->data, p_notify->aux_reply.data, 13419 p_notify->aux_reply.length); 13420 13421 /* success */ 13422 ret = p_notify->aux_reply.length; 13423 *operation_result = p_notify->result; 13424 out: 13425 reinit_completion(&adev->dm.dmub_aux_transfer_done); 13426 mutex_unlock(&adev->dm.dpia_aux_lock); 13427 return ret; 13428 } 13429 13430 static void abort_fused_io( 13431 struct dc_context *ctx, 13432 const struct dmub_cmd_fused_request *request 13433 ) 13434 { 13435 union dmub_rb_cmd command = { 0 }; 13436 struct dmub_rb_cmd_fused_io *io = &command.fused_io; 13437 13438 io->header.type = DMUB_CMD__FUSED_IO; 13439 io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; 13440 io->header.payload_bytes = sizeof(*io) - sizeof(io->header); 13441 io->request = *request; 13442 dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); 13443 } 13444 13445 static bool execute_fused_io( 13446 struct amdgpu_device *dev, 13447 struct dc_context *ctx, 13448 union dmub_rb_cmd *commands, 13449 uint8_t count, 13450 uint32_t timeout_us 13451 ) 13452 { 13453 const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; 13454 13455 if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) 13456 return false; 13457 13458 struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; 13459 struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; 13460 const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 13461 && first->header.ret_status 13462 && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; 13463 13464 if (!result) 13465 return false; 13466 13467 while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { 13468 reinit_completion(&sync->replied); 13469 13470 struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; 13471 13472 static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); 13473 13474 if (reply->identifier == first->request.identifier) { 13475 first->request = *reply; 13476 return true; 13477 } 13478 } 13479 13480 reinit_completion(&sync->replied); 13481 first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; 13482 abort_fused_io(ctx, &first->request); 13483 return false; 13484 } 13485 13486 bool amdgpu_dm_execute_fused_io( 13487 struct amdgpu_device *dev, 13488 struct dc_link *link, 13489 union dmub_rb_cmd *commands, 13490 uint8_t count, 13491 uint32_t timeout_us) 13492 { 13493 struct amdgpu_display_manager *dm = &dev->dm; 13494 13495 mutex_lock(&dm->dpia_aux_lock); 13496 13497 const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); 13498 13499 mutex_unlock(&dm->dpia_aux_lock); 13500 return result; 13501 } 13502 13503 int amdgpu_dm_process_dmub_set_config_sync( 13504 struct dc_context *ctx, 13505 unsigned int link_index, 13506 struct set_config_cmd_payload *payload, 13507 enum set_config_status *operation_result) 13508 { 13509 struct amdgpu_device *adev = ctx->driver_context; 13510 bool is_cmd_complete; 13511 int ret; 13512 13513 mutex_lock(&adev->dm.dpia_aux_lock); 13514 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 13515 link_index, payload, adev->dm.dmub_notify); 13516 13517 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 13518 ret = 0; 13519 *operation_result = adev->dm.dmub_notify->sc_status; 13520 } else { 13521 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); 13522 ret = -1; 13523 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 13524 } 13525 13526 if (!is_cmd_complete) 13527 reinit_completion(&adev->dm.dmub_aux_transfer_done); 13528 mutex_unlock(&adev->dm.dpia_aux_lock); 13529 return ret; 13530 } 13531 13532 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 13533 { 13534 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 13535 } 13536 13537 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 13538 { 13539 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 13540 } 13541 13542 void dm_acpi_process_phy_transition_interlock( 13543 const struct dc_context *ctx, 13544 struct dm_process_phy_transition_init_params process_phy_transition_init_params) 13545 { 13546 // Not yet implemented 13547 } 13548