1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "dc/dc_state.h" 41 #include "amdgpu_dm_trace.h" 42 #include "dpcd_defs.h" 43 #include "link/protocols/link_dpcd.h" 44 #include "link_service_types.h" 45 #include "link/protocols/link_dp_capability.h" 46 #include "link/protocols/link_ddc.h" 47 48 #include "vid.h" 49 #include "amdgpu.h" 50 #include "amdgpu_display.h" 51 #include "amdgpu_ucode.h" 52 #include "atom.h" 53 #include "amdgpu_dm.h" 54 #include "amdgpu_dm_plane.h" 55 #include "amdgpu_dm_crtc.h" 56 #include "amdgpu_dm_hdcp.h" 57 #include <drm/display/drm_hdcp_helper.h> 58 #include "amdgpu_dm_wb.h" 59 #include "amdgpu_pm.h" 60 #include "amdgpu_atombios.h" 61 62 #include "amd_shared.h" 63 #include "amdgpu_dm_irq.h" 64 #include "dm_helpers.h" 65 #include "amdgpu_dm_mst_types.h" 66 #if defined(CONFIG_DEBUG_FS) 67 #include "amdgpu_dm_debugfs.h" 68 #endif 69 #include "amdgpu_dm_psr.h" 70 #include "amdgpu_dm_replay.h" 71 72 #include "ivsrcid/ivsrcid_vislands30.h" 73 74 #include <linux/backlight.h> 75 #include <linux/module.h> 76 #include <linux/moduleparam.h> 77 #include <linux/types.h> 78 #include <linux/pm_runtime.h> 79 #include <linux/pci.h> 80 #include <linux/power_supply.h> 81 #include <linux/firmware.h> 82 #include <linux/component.h> 83 #include <linux/dmi.h> 84 #include <linux/sort.h> 85 86 #include <drm/display/drm_dp_mst_helper.h> 87 #include <drm/display/drm_hdmi_helper.h> 88 #include <drm/drm_atomic.h> 89 #include <drm/drm_atomic_uapi.h> 90 #include <drm/drm_atomic_helper.h> 91 #include <drm/drm_blend.h> 92 #include <drm/drm_fixed.h> 93 #include <drm/drm_fourcc.h> 94 #include <drm/drm_edid.h> 95 #include <drm/drm_eld.h> 96 #include <drm/drm_vblank.h> 97 #include <drm/drm_audio_component.h> 98 #include <drm/drm_gem_atomic_helper.h> 99 100 #include <acpi/video.h> 101 102 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 103 104 #include "dcn/dcn_1_0_offset.h" 105 #include "dcn/dcn_1_0_sh_mask.h" 106 #include "soc15_hw_ip.h" 107 #include "soc15_common.h" 108 #include "vega10_ip_offset.h" 109 110 #include "gc/gc_11_0_0_offset.h" 111 #include "gc/gc_11_0_0_sh_mask.h" 112 113 #include "modules/inc/mod_freesync.h" 114 #include "modules/power/power_helpers.h" 115 116 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 118 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 120 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 122 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 124 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 125 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 126 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 127 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 128 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 129 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 130 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 131 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 132 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 133 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 134 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 135 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 136 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 137 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 138 139 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 140 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 141 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 142 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 143 144 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 145 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 146 147 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 148 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 149 150 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 151 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 152 153 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 154 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 155 156 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 157 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 158 159 /* Number of bytes in PSP header for firmware. */ 160 #define PSP_HEADER_BYTES 0x100 161 162 /* Number of bytes in PSP footer for firmware. */ 163 #define PSP_FOOTER_BYTES 0x100 164 165 /** 166 * DOC: overview 167 * 168 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 169 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 170 * requests into DC requests, and DC responses into DRM responses. 171 * 172 * The root control structure is &struct amdgpu_display_manager. 173 */ 174 175 /* basic init/fini API */ 176 static int amdgpu_dm_init(struct amdgpu_device *adev); 177 static void amdgpu_dm_fini(struct amdgpu_device *adev); 178 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 179 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); 180 181 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 182 { 183 switch (link->dpcd_caps.dongle_type) { 184 case DISPLAY_DONGLE_NONE: 185 return DRM_MODE_SUBCONNECTOR_Native; 186 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 187 return DRM_MODE_SUBCONNECTOR_VGA; 188 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 189 case DISPLAY_DONGLE_DP_DVI_DONGLE: 190 return DRM_MODE_SUBCONNECTOR_DVID; 191 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 192 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 193 return DRM_MODE_SUBCONNECTOR_HDMIA; 194 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 195 default: 196 return DRM_MODE_SUBCONNECTOR_Unknown; 197 } 198 } 199 200 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 201 { 202 struct dc_link *link = aconnector->dc_link; 203 struct drm_connector *connector = &aconnector->base; 204 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 205 206 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 207 return; 208 209 if (aconnector->dc_sink) 210 subconnector = get_subconnector_type(link); 211 212 drm_object_property_set_value(&connector->base, 213 connector->dev->mode_config.dp_subconnector_property, 214 subconnector); 215 } 216 217 /* 218 * initializes drm_device display related structures, based on the information 219 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 220 * drm_encoder, drm_mode_config 221 * 222 * Returns 0 on success 223 */ 224 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 225 /* removes and deallocates the drm structures, created by the above function */ 226 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 227 228 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 229 struct amdgpu_dm_connector *amdgpu_dm_connector, 230 u32 link_index, 231 struct amdgpu_encoder *amdgpu_encoder); 232 static int amdgpu_dm_encoder_init(struct drm_device *dev, 233 struct amdgpu_encoder *aencoder, 234 uint32_t link_index); 235 236 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 237 238 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 239 240 static int amdgpu_dm_atomic_check(struct drm_device *dev, 241 struct drm_atomic_state *state); 242 243 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 244 static void handle_hpd_rx_irq(void *param); 245 246 static bool 247 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 248 struct drm_crtc_state *new_crtc_state); 249 /* 250 * dm_vblank_get_counter 251 * 252 * @brief 253 * Get counter for number of vertical blanks 254 * 255 * @param 256 * struct amdgpu_device *adev - [in] desired amdgpu device 257 * int disp_idx - [in] which CRTC to get the counter from 258 * 259 * @return 260 * Counter for vertical blanks 261 */ 262 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 263 { 264 struct amdgpu_crtc *acrtc = NULL; 265 266 if (crtc >= adev->mode_info.num_crtc) 267 return 0; 268 269 acrtc = adev->mode_info.crtcs[crtc]; 270 271 if (!acrtc->dm_irq_params.stream) { 272 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 273 crtc); 274 return 0; 275 } 276 277 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 278 } 279 280 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 281 u32 *vbl, u32 *position) 282 { 283 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; 284 struct amdgpu_crtc *acrtc = NULL; 285 struct dc *dc = adev->dm.dc; 286 287 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 288 return -EINVAL; 289 290 acrtc = adev->mode_info.crtcs[crtc]; 291 292 if (!acrtc->dm_irq_params.stream) { 293 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 294 crtc); 295 return 0; 296 } 297 298 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 299 dc_allow_idle_optimizations(dc, false); 300 301 /* 302 * TODO rework base driver to use values directly. 303 * for now parse it back into reg-format 304 */ 305 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 306 &v_blank_start, 307 &v_blank_end, 308 &h_position, 309 &v_position); 310 311 *position = v_position | (h_position << 16); 312 *vbl = v_blank_start | (v_blank_end << 16); 313 314 return 0; 315 } 316 317 static bool dm_is_idle(void *handle) 318 { 319 /* XXX todo */ 320 return true; 321 } 322 323 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) 324 { 325 /* XXX todo */ 326 return 0; 327 } 328 329 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) 330 { 331 return false; 332 } 333 334 static int dm_soft_reset(struct amdgpu_ip_block *ip_block) 335 { 336 /* XXX todo */ 337 return 0; 338 } 339 340 static struct amdgpu_crtc * 341 get_crtc_by_otg_inst(struct amdgpu_device *adev, 342 int otg_inst) 343 { 344 struct drm_device *dev = adev_to_drm(adev); 345 struct drm_crtc *crtc; 346 struct amdgpu_crtc *amdgpu_crtc; 347 348 if (WARN_ON(otg_inst == -1)) 349 return adev->mode_info.crtcs[0]; 350 351 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 352 amdgpu_crtc = to_amdgpu_crtc(crtc); 353 354 if (amdgpu_crtc->otg_inst == otg_inst) 355 return amdgpu_crtc; 356 } 357 358 return NULL; 359 } 360 361 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 362 struct dm_crtc_state *new_state) 363 { 364 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 365 return true; 366 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 367 return true; 368 else 369 return false; 370 } 371 372 /* 373 * DC will program planes with their z-order determined by their ordering 374 * in the dc_surface_updates array. This comparator is used to sort them 375 * by descending zpos. 376 */ 377 static int dm_plane_layer_index_cmp(const void *a, const void *b) 378 { 379 const struct dc_surface_update *sa = (struct dc_surface_update *)a; 380 const struct dc_surface_update *sb = (struct dc_surface_update *)b; 381 382 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ 383 return sb->surface->layer_index - sa->surface->layer_index; 384 } 385 386 /** 387 * update_planes_and_stream_adapter() - Send planes to be updated in DC 388 * 389 * DC has a generic way to update planes and stream via 390 * dc_update_planes_and_stream function; however, DM might need some 391 * adjustments and preparation before calling it. This function is a wrapper 392 * for the dc_update_planes_and_stream that does any required configuration 393 * before passing control to DC. 394 * 395 * @dc: Display Core control structure 396 * @update_type: specify whether it is FULL/MEDIUM/FAST update 397 * @planes_count: planes count to update 398 * @stream: stream state 399 * @stream_update: stream update 400 * @array_of_surface_update: dc surface update pointer 401 * 402 */ 403 static inline bool update_planes_and_stream_adapter(struct dc *dc, 404 int update_type, 405 int planes_count, 406 struct dc_stream_state *stream, 407 struct dc_stream_update *stream_update, 408 struct dc_surface_update *array_of_surface_update) 409 { 410 sort(array_of_surface_update, planes_count, 411 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); 412 413 /* 414 * Previous frame finished and HW is ready for optimization. 415 */ 416 if (update_type == UPDATE_TYPE_FAST) 417 dc_post_update_surfaces_to_stream(dc); 418 419 return dc_update_planes_and_stream(dc, 420 array_of_surface_update, 421 planes_count, 422 stream, 423 stream_update); 424 } 425 426 /** 427 * dm_pflip_high_irq() - Handle pageflip interrupt 428 * @interrupt_params: ignored 429 * 430 * Handles the pageflip interrupt by notifying all interested parties 431 * that the pageflip has been completed. 432 */ 433 static void dm_pflip_high_irq(void *interrupt_params) 434 { 435 struct amdgpu_crtc *amdgpu_crtc; 436 struct common_irq_params *irq_params = interrupt_params; 437 struct amdgpu_device *adev = irq_params->adev; 438 struct drm_device *dev = adev_to_drm(adev); 439 unsigned long flags; 440 struct drm_pending_vblank_event *e; 441 u32 vpos, hpos, v_blank_start, v_blank_end; 442 bool vrr_active; 443 444 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 445 446 /* IRQ could occur when in initial stage */ 447 /* TODO work and BO cleanup */ 448 if (amdgpu_crtc == NULL) { 449 drm_dbg_state(dev, "CRTC is null, returning.\n"); 450 return; 451 } 452 453 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 454 455 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 456 drm_dbg_state(dev, 457 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 458 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 459 amdgpu_crtc->crtc_id, amdgpu_crtc); 460 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 461 return; 462 } 463 464 /* page flip completed. */ 465 e = amdgpu_crtc->event; 466 amdgpu_crtc->event = NULL; 467 468 WARN_ON(!e); 469 470 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 471 472 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 473 if (!vrr_active || 474 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 475 &v_blank_end, &hpos, &vpos) || 476 (vpos < v_blank_start)) { 477 /* Update to correct count and vblank timestamp if racing with 478 * vblank irq. This also updates to the correct vblank timestamp 479 * even in VRR mode, as scanout is past the front-porch atm. 480 */ 481 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 482 483 /* Wake up userspace by sending the pageflip event with proper 484 * count and timestamp of vblank of flip completion. 485 */ 486 if (e) { 487 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 488 489 /* Event sent, so done with vblank for this flip */ 490 drm_crtc_vblank_put(&amdgpu_crtc->base); 491 } 492 } else if (e) { 493 /* VRR active and inside front-porch: vblank count and 494 * timestamp for pageflip event will only be up to date after 495 * drm_crtc_handle_vblank() has been executed from late vblank 496 * irq handler after start of back-porch (vline 0). We queue the 497 * pageflip event for send-out by drm_crtc_handle_vblank() with 498 * updated timestamp and count, once it runs after us. 499 * 500 * We need to open-code this instead of using the helper 501 * drm_crtc_arm_vblank_event(), as that helper would 502 * call drm_crtc_accurate_vblank_count(), which we must 503 * not call in VRR mode while we are in front-porch! 504 */ 505 506 /* sequence will be replaced by real count during send-out. */ 507 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 508 e->pipe = amdgpu_crtc->crtc_id; 509 510 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 511 e = NULL; 512 } 513 514 /* Keep track of vblank of this flip for flip throttling. We use the 515 * cooked hw counter, as that one incremented at start of this vblank 516 * of pageflip completion, so last_flip_vblank is the forbidden count 517 * for queueing new pageflips if vsync + VRR is enabled. 518 */ 519 amdgpu_crtc->dm_irq_params.last_flip_vblank = 520 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 521 522 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 523 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 524 525 drm_dbg_state(dev, 526 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 527 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 528 } 529 530 static void dm_vupdate_high_irq(void *interrupt_params) 531 { 532 struct common_irq_params *irq_params = interrupt_params; 533 struct amdgpu_device *adev = irq_params->adev; 534 struct amdgpu_crtc *acrtc; 535 struct drm_device *drm_dev; 536 struct drm_vblank_crtc *vblank; 537 ktime_t frame_duration_ns, previous_timestamp; 538 unsigned long flags; 539 int vrr_active; 540 541 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 542 543 if (acrtc) { 544 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 545 drm_dev = acrtc->base.dev; 546 vblank = drm_crtc_vblank_crtc(&acrtc->base); 547 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 548 frame_duration_ns = vblank->time - previous_timestamp; 549 550 if (frame_duration_ns > 0) { 551 trace_amdgpu_refresh_rate_track(acrtc->base.index, 552 frame_duration_ns, 553 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 554 atomic64_set(&irq_params->previous_timestamp, vblank->time); 555 } 556 557 drm_dbg_vbl(drm_dev, 558 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 559 vrr_active); 560 561 /* Core vblank handling is done here after end of front-porch in 562 * vrr mode, as vblank timestamping will give valid results 563 * while now done after front-porch. This will also deliver 564 * page-flip completion events that have been queued to us 565 * if a pageflip happened inside front-porch. 566 */ 567 if (vrr_active) { 568 amdgpu_dm_crtc_handle_vblank(acrtc); 569 570 /* BTR processing for pre-DCE12 ASICs */ 571 if (acrtc->dm_irq_params.stream && 572 adev->family < AMDGPU_FAMILY_AI) { 573 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 574 mod_freesync_handle_v_update( 575 adev->dm.freesync_module, 576 acrtc->dm_irq_params.stream, 577 &acrtc->dm_irq_params.vrr_params); 578 579 dc_stream_adjust_vmin_vmax( 580 adev->dm.dc, 581 acrtc->dm_irq_params.stream, 582 &acrtc->dm_irq_params.vrr_params.adjust); 583 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 584 } 585 } 586 } 587 } 588 589 /** 590 * dm_crtc_high_irq() - Handles CRTC interrupt 591 * @interrupt_params: used for determining the CRTC instance 592 * 593 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 594 * event handler. 595 */ 596 static void dm_crtc_high_irq(void *interrupt_params) 597 { 598 struct common_irq_params *irq_params = interrupt_params; 599 struct amdgpu_device *adev = irq_params->adev; 600 struct drm_writeback_job *job; 601 struct amdgpu_crtc *acrtc; 602 unsigned long flags; 603 int vrr_active; 604 605 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 606 if (!acrtc) 607 return; 608 609 if (acrtc->wb_conn) { 610 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 611 612 if (acrtc->wb_pending) { 613 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 614 struct drm_writeback_job, 615 list_entry); 616 acrtc->wb_pending = false; 617 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 618 619 if (job) { 620 unsigned int v_total, refresh_hz; 621 struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 622 623 v_total = stream->adjust.v_total_max ? 624 stream->adjust.v_total_max : stream->timing.v_total; 625 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 626 100LL, (v_total * stream->timing.h_total)); 627 mdelay(1000 / refresh_hz); 628 629 drm_writeback_signal_completion(acrtc->wb_conn, 0); 630 dc_stream_fc_disable_writeback(adev->dm.dc, 631 acrtc->dm_irq_params.stream, 0); 632 } 633 } else 634 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 635 } 636 637 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 638 639 drm_dbg_vbl(adev_to_drm(adev), 640 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 641 vrr_active, acrtc->dm_irq_params.active_planes); 642 643 /** 644 * Core vblank handling at start of front-porch is only possible 645 * in non-vrr mode, as only there vblank timestamping will give 646 * valid results while done in front-porch. Otherwise defer it 647 * to dm_vupdate_high_irq after end of front-porch. 648 */ 649 if (!vrr_active) 650 amdgpu_dm_crtc_handle_vblank(acrtc); 651 652 /** 653 * Following stuff must happen at start of vblank, for crc 654 * computation and below-the-range btr support in vrr mode. 655 */ 656 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 657 658 /* BTR updates need to happen before VUPDATE on Vega and above. */ 659 if (adev->family < AMDGPU_FAMILY_AI) 660 return; 661 662 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 663 664 if (acrtc->dm_irq_params.stream && 665 acrtc->dm_irq_params.vrr_params.supported && 666 acrtc->dm_irq_params.freesync_config.state == 667 VRR_STATE_ACTIVE_VARIABLE) { 668 mod_freesync_handle_v_update(adev->dm.freesync_module, 669 acrtc->dm_irq_params.stream, 670 &acrtc->dm_irq_params.vrr_params); 671 672 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 673 &acrtc->dm_irq_params.vrr_params.adjust); 674 } 675 676 /* 677 * If there aren't any active_planes then DCH HUBP may be clock-gated. 678 * In that case, pageflip completion interrupts won't fire and pageflip 679 * completion events won't get delivered. Prevent this by sending 680 * pending pageflip events from here if a flip is still pending. 681 * 682 * If any planes are enabled, use dm_pflip_high_irq() instead, to 683 * avoid race conditions between flip programming and completion, 684 * which could cause too early flip completion events. 685 */ 686 if (adev->family >= AMDGPU_FAMILY_RV && 687 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 688 acrtc->dm_irq_params.active_planes == 0) { 689 if (acrtc->event) { 690 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 691 acrtc->event = NULL; 692 drm_crtc_vblank_put(&acrtc->base); 693 } 694 acrtc->pflip_status = AMDGPU_FLIP_NONE; 695 } 696 697 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 698 } 699 700 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 701 /** 702 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 703 * DCN generation ASICs 704 * @interrupt_params: interrupt parameters 705 * 706 * Used to set crc window/read out crc value at vertical line 0 position 707 */ 708 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 709 { 710 struct common_irq_params *irq_params = interrupt_params; 711 struct amdgpu_device *adev = irq_params->adev; 712 struct amdgpu_crtc *acrtc; 713 714 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 715 716 if (!acrtc) 717 return; 718 719 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 720 } 721 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 722 723 /** 724 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 725 * @adev: amdgpu_device pointer 726 * @notify: dmub notification structure 727 * 728 * Dmub AUX or SET_CONFIG command completion processing callback 729 * Copies dmub notification to DM which is to be read by AUX command. 730 * issuing thread and also signals the event to wake up the thread. 731 */ 732 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 733 struct dmub_notification *notify) 734 { 735 if (adev->dm.dmub_notify) 736 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 737 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 738 complete(&adev->dm.dmub_aux_transfer_done); 739 } 740 741 /** 742 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 743 * @adev: amdgpu_device pointer 744 * @notify: dmub notification structure 745 * 746 * Dmub Hpd interrupt processing callback. Gets displayindex through the 747 * ink index and calls helper to do the processing. 748 */ 749 static void dmub_hpd_callback(struct amdgpu_device *adev, 750 struct dmub_notification *notify) 751 { 752 struct amdgpu_dm_connector *aconnector; 753 struct amdgpu_dm_connector *hpd_aconnector = NULL; 754 struct drm_connector *connector; 755 struct drm_connector_list_iter iter; 756 struct dc_link *link; 757 u8 link_index = 0; 758 struct drm_device *dev; 759 760 if (adev == NULL) 761 return; 762 763 if (notify == NULL) { 764 DRM_ERROR("DMUB HPD callback notification was NULL"); 765 return; 766 } 767 768 if (notify->link_index > adev->dm.dc->link_count) { 769 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 770 return; 771 } 772 773 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ 774 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { 775 DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); 776 return; 777 } 778 779 link_index = notify->link_index; 780 link = adev->dm.dc->links[link_index]; 781 dev = adev->dm.ddev; 782 783 drm_connector_list_iter_begin(dev, &iter); 784 drm_for_each_connector_iter(connector, &iter) { 785 786 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 787 continue; 788 789 aconnector = to_amdgpu_dm_connector(connector); 790 if (link && aconnector->dc_link == link) { 791 if (notify->type == DMUB_NOTIFICATION_HPD) 792 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 793 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 794 DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index); 795 else 796 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 797 notify->type, link_index); 798 799 hpd_aconnector = aconnector; 800 break; 801 } 802 } 803 drm_connector_list_iter_end(&iter); 804 805 if (hpd_aconnector) { 806 if (notify->type == DMUB_NOTIFICATION_HPD) { 807 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) 808 DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index); 809 handle_hpd_irq_helper(hpd_aconnector); 810 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { 811 handle_hpd_rx_irq(hpd_aconnector); 812 } 813 } 814 } 815 816 /** 817 * dmub_hpd_sense_callback - DMUB HPD sense processing callback. 818 * @adev: amdgpu_device pointer 819 * @notify: dmub notification structure 820 * 821 * HPD sense changes can occur during low power states and need to be 822 * notified from firmware to driver. 823 */ 824 static void dmub_hpd_sense_callback(struct amdgpu_device *adev, 825 struct dmub_notification *notify) 826 { 827 DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); 828 } 829 830 /** 831 * register_dmub_notify_callback - Sets callback for DMUB notify 832 * @adev: amdgpu_device pointer 833 * @type: Type of dmub notification 834 * @callback: Dmub interrupt callback function 835 * @dmub_int_thread_offload: offload indicator 836 * 837 * API to register a dmub callback handler for a dmub notification 838 * Also sets indicator whether callback processing to be offloaded. 839 * to dmub interrupt handling thread 840 * Return: true if successfully registered, false if there is existing registration 841 */ 842 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 843 enum dmub_notification_type type, 844 dmub_notify_interrupt_callback_t callback, 845 bool dmub_int_thread_offload) 846 { 847 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 848 adev->dm.dmub_callback[type] = callback; 849 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 850 } else 851 return false; 852 853 return true; 854 } 855 856 static void dm_handle_hpd_work(struct work_struct *work) 857 { 858 struct dmub_hpd_work *dmub_hpd_wrk; 859 860 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 861 862 if (!dmub_hpd_wrk->dmub_notify) { 863 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 864 return; 865 } 866 867 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 868 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 869 dmub_hpd_wrk->dmub_notify); 870 } 871 872 kfree(dmub_hpd_wrk->dmub_notify); 873 kfree(dmub_hpd_wrk); 874 875 } 876 877 #define DMUB_TRACE_MAX_READ 64 878 /** 879 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 880 * @interrupt_params: used for determining the Outbox instance 881 * 882 * Handles the Outbox Interrupt 883 * event handler. 884 */ 885 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 886 { 887 struct dmub_notification notify = {0}; 888 struct common_irq_params *irq_params = interrupt_params; 889 struct amdgpu_device *adev = irq_params->adev; 890 struct amdgpu_display_manager *dm = &adev->dm; 891 struct dmcub_trace_buf_entry entry = { 0 }; 892 u32 count = 0; 893 struct dmub_hpd_work *dmub_hpd_wrk; 894 static const char *const event_type[] = { 895 "NO_DATA", 896 "AUX_REPLY", 897 "HPD", 898 "HPD_IRQ", 899 "SET_CONFIGC_REPLY", 900 "DPIA_NOTIFICATION", 901 "HPD_SENSE_NOTIFY", 902 }; 903 904 do { 905 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 906 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 907 entry.param0, entry.param1); 908 909 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 910 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 911 } else 912 break; 913 914 count++; 915 916 } while (count <= DMUB_TRACE_MAX_READ); 917 918 if (count > DMUB_TRACE_MAX_READ) 919 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 920 921 if (dc_enable_dmub_notifications(adev->dm.dc) && 922 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 923 924 do { 925 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 926 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 927 DRM_ERROR("DM: notify type %d invalid!", notify.type); 928 continue; 929 } 930 if (!dm->dmub_callback[notify.type]) { 931 DRM_WARN("DMUB notification skipped due to no handler: type=%s\n", 932 event_type[notify.type]); 933 continue; 934 } 935 if (dm->dmub_thread_offload[notify.type] == true) { 936 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 937 if (!dmub_hpd_wrk) { 938 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 939 return; 940 } 941 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 942 GFP_ATOMIC); 943 if (!dmub_hpd_wrk->dmub_notify) { 944 kfree(dmub_hpd_wrk); 945 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 946 return; 947 } 948 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 949 dmub_hpd_wrk->adev = adev; 950 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 951 } else { 952 dm->dmub_callback[notify.type](adev, ¬ify); 953 } 954 } while (notify.pending_notification); 955 } 956 } 957 958 static int dm_set_clockgating_state(void *handle, 959 enum amd_clockgating_state state) 960 { 961 return 0; 962 } 963 964 static int dm_set_powergating_state(void *handle, 965 enum amd_powergating_state state) 966 { 967 return 0; 968 } 969 970 /* Prototypes of private functions */ 971 static int dm_early_init(struct amdgpu_ip_block *ip_block); 972 973 /* Allocate memory for FBC compressed data */ 974 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 975 { 976 struct amdgpu_device *adev = drm_to_adev(connector->dev); 977 struct dm_compressor_info *compressor = &adev->dm.compressor; 978 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 979 struct drm_display_mode *mode; 980 unsigned long max_size = 0; 981 982 if (adev->dm.dc->fbc_compressor == NULL) 983 return; 984 985 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 986 return; 987 988 if (compressor->bo_ptr) 989 return; 990 991 992 list_for_each_entry(mode, &connector->modes, head) { 993 if (max_size < (unsigned long) mode->htotal * mode->vtotal) 994 max_size = (unsigned long) mode->htotal * mode->vtotal; 995 } 996 997 if (max_size) { 998 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 999 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 1000 &compressor->gpu_addr, &compressor->cpu_addr); 1001 1002 if (r) 1003 DRM_ERROR("DM: Failed to initialize FBC\n"); 1004 else { 1005 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 1006 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 1007 } 1008 1009 } 1010 1011 } 1012 1013 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 1014 int pipe, bool *enabled, 1015 unsigned char *buf, int max_bytes) 1016 { 1017 struct drm_device *dev = dev_get_drvdata(kdev); 1018 struct amdgpu_device *adev = drm_to_adev(dev); 1019 struct drm_connector *connector; 1020 struct drm_connector_list_iter conn_iter; 1021 struct amdgpu_dm_connector *aconnector; 1022 int ret = 0; 1023 1024 *enabled = false; 1025 1026 mutex_lock(&adev->dm.audio_lock); 1027 1028 drm_connector_list_iter_begin(dev, &conn_iter); 1029 drm_for_each_connector_iter(connector, &conn_iter) { 1030 1031 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1032 continue; 1033 1034 aconnector = to_amdgpu_dm_connector(connector); 1035 if (aconnector->audio_inst != port) 1036 continue; 1037 1038 *enabled = true; 1039 ret = drm_eld_size(connector->eld); 1040 memcpy(buf, connector->eld, min(max_bytes, ret)); 1041 1042 break; 1043 } 1044 drm_connector_list_iter_end(&conn_iter); 1045 1046 mutex_unlock(&adev->dm.audio_lock); 1047 1048 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 1049 1050 return ret; 1051 } 1052 1053 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 1054 .get_eld = amdgpu_dm_audio_component_get_eld, 1055 }; 1056 1057 static int amdgpu_dm_audio_component_bind(struct device *kdev, 1058 struct device *hda_kdev, void *data) 1059 { 1060 struct drm_device *dev = dev_get_drvdata(kdev); 1061 struct amdgpu_device *adev = drm_to_adev(dev); 1062 struct drm_audio_component *acomp = data; 1063 1064 acomp->ops = &amdgpu_dm_audio_component_ops; 1065 acomp->dev = kdev; 1066 adev->dm.audio_component = acomp; 1067 1068 return 0; 1069 } 1070 1071 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 1072 struct device *hda_kdev, void *data) 1073 { 1074 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); 1075 struct drm_audio_component *acomp = data; 1076 1077 acomp->ops = NULL; 1078 acomp->dev = NULL; 1079 adev->dm.audio_component = NULL; 1080 } 1081 1082 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1083 .bind = amdgpu_dm_audio_component_bind, 1084 .unbind = amdgpu_dm_audio_component_unbind, 1085 }; 1086 1087 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1088 { 1089 int i, ret; 1090 1091 if (!amdgpu_audio) 1092 return 0; 1093 1094 adev->mode_info.audio.enabled = true; 1095 1096 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1097 1098 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1099 adev->mode_info.audio.pin[i].channels = -1; 1100 adev->mode_info.audio.pin[i].rate = -1; 1101 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1102 adev->mode_info.audio.pin[i].status_bits = 0; 1103 adev->mode_info.audio.pin[i].category_code = 0; 1104 adev->mode_info.audio.pin[i].connected = false; 1105 adev->mode_info.audio.pin[i].id = 1106 adev->dm.dc->res_pool->audios[i]->inst; 1107 adev->mode_info.audio.pin[i].offset = 0; 1108 } 1109 1110 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1111 if (ret < 0) 1112 return ret; 1113 1114 adev->dm.audio_registered = true; 1115 1116 return 0; 1117 } 1118 1119 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1120 { 1121 if (!amdgpu_audio) 1122 return; 1123 1124 if (!adev->mode_info.audio.enabled) 1125 return; 1126 1127 if (adev->dm.audio_registered) { 1128 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1129 adev->dm.audio_registered = false; 1130 } 1131 1132 /* TODO: Disable audio? */ 1133 1134 adev->mode_info.audio.enabled = false; 1135 } 1136 1137 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1138 { 1139 struct drm_audio_component *acomp = adev->dm.audio_component; 1140 1141 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1142 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1143 1144 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1145 pin, -1); 1146 } 1147 } 1148 1149 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1150 { 1151 const struct dmcub_firmware_header_v1_0 *hdr; 1152 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1153 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1154 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1155 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1156 struct abm *abm = adev->dm.dc->res_pool->abm; 1157 struct dc_context *ctx = adev->dm.dc->ctx; 1158 struct dmub_srv_hw_params hw_params; 1159 enum dmub_status status; 1160 const unsigned char *fw_inst_const, *fw_bss_data; 1161 u32 i, fw_inst_const_size, fw_bss_data_size; 1162 bool has_hw_support; 1163 1164 if (!dmub_srv) 1165 /* DMUB isn't supported on the ASIC. */ 1166 return 0; 1167 1168 if (!fb_info) { 1169 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1170 return -EINVAL; 1171 } 1172 1173 if (!dmub_fw) { 1174 /* Firmware required for DMUB support. */ 1175 DRM_ERROR("No firmware provided for DMUB.\n"); 1176 return -EINVAL; 1177 } 1178 1179 /* initialize register offsets for ASICs with runtime initialization available */ 1180 if (dmub_srv->hw_funcs.init_reg_offsets) 1181 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1182 1183 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1184 if (status != DMUB_STATUS_OK) { 1185 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1186 return -EINVAL; 1187 } 1188 1189 if (!has_hw_support) { 1190 DRM_INFO("DMUB unsupported on ASIC\n"); 1191 return 0; 1192 } 1193 1194 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1195 status = dmub_srv_hw_reset(dmub_srv); 1196 if (status != DMUB_STATUS_OK) 1197 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1198 1199 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1200 1201 fw_inst_const = dmub_fw->data + 1202 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1203 PSP_HEADER_BYTES; 1204 1205 fw_bss_data = dmub_fw->data + 1206 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1207 le32_to_cpu(hdr->inst_const_bytes); 1208 1209 /* Copy firmware and bios info into FB memory. */ 1210 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1211 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1212 1213 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1214 1215 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1216 * amdgpu_ucode_init_single_fw will load dmub firmware 1217 * fw_inst_const part to cw0; otherwise, the firmware back door load 1218 * will be done by dm_dmub_hw_init 1219 */ 1220 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1221 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1222 fw_inst_const_size); 1223 } 1224 1225 if (fw_bss_data_size) 1226 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1227 fw_bss_data, fw_bss_data_size); 1228 1229 /* Copy firmware bios info into FB memory. */ 1230 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1231 adev->bios_size); 1232 1233 /* Reset regions that need to be reset. */ 1234 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1235 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1236 1237 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1238 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1239 1240 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1241 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1242 1243 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, 1244 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); 1245 1246 /* Initialize hardware. */ 1247 memset(&hw_params, 0, sizeof(hw_params)); 1248 hw_params.fb_base = adev->gmc.fb_start; 1249 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1250 1251 /* backdoor load firmware and trigger dmub running */ 1252 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1253 hw_params.load_inst_const = true; 1254 1255 if (dmcu) 1256 hw_params.psp_version = dmcu->psp_version; 1257 1258 for (i = 0; i < fb_info->num_fb; ++i) 1259 hw_params.fb[i] = &fb_info->fb[i]; 1260 1261 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1262 case IP_VERSION(3, 1, 3): 1263 case IP_VERSION(3, 1, 4): 1264 case IP_VERSION(3, 5, 0): 1265 case IP_VERSION(3, 5, 1): 1266 case IP_VERSION(4, 0, 1): 1267 hw_params.dpia_supported = true; 1268 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1269 break; 1270 default: 1271 break; 1272 } 1273 1274 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1275 case IP_VERSION(3, 5, 0): 1276 case IP_VERSION(3, 5, 1): 1277 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1278 break; 1279 default: 1280 break; 1281 } 1282 1283 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1284 if (status != DMUB_STATUS_OK) { 1285 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1286 return -EINVAL; 1287 } 1288 1289 /* Wait for firmware load to finish. */ 1290 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1291 if (status != DMUB_STATUS_OK) 1292 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1293 1294 /* Init DMCU and ABM if available. */ 1295 if (dmcu && abm) { 1296 dmcu->funcs->dmcu_init(dmcu); 1297 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1298 } 1299 1300 if (!adev->dm.dc->ctx->dmub_srv) 1301 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1302 if (!adev->dm.dc->ctx->dmub_srv) { 1303 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1304 return -ENOMEM; 1305 } 1306 1307 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1308 adev->dm.dmcub_fw_version); 1309 1310 /* Keeping sanity checks off if 1311 * DCN31 >= 4.0.59.0 1312 * DCN314 >= 8.0.16.0 1313 * Otherwise, turn on sanity checks 1314 */ 1315 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1316 case IP_VERSION(3, 1, 2): 1317 case IP_VERSION(3, 1, 3): 1318 if (adev->dm.dmcub_fw_version && 1319 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1320 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) 1321 adev->dm.dc->debug.sanity_checks = true; 1322 break; 1323 case IP_VERSION(3, 1, 4): 1324 if (adev->dm.dmcub_fw_version && 1325 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1326 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) 1327 adev->dm.dc->debug.sanity_checks = true; 1328 break; 1329 default: 1330 break; 1331 } 1332 1333 return 0; 1334 } 1335 1336 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1337 { 1338 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1339 enum dmub_status status; 1340 bool init; 1341 int r; 1342 1343 if (!dmub_srv) { 1344 /* DMUB isn't supported on the ASIC. */ 1345 return; 1346 } 1347 1348 status = dmub_srv_is_hw_init(dmub_srv, &init); 1349 if (status != DMUB_STATUS_OK) 1350 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1351 1352 if (status == DMUB_STATUS_OK && init) { 1353 /* Wait for firmware load to finish. */ 1354 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1355 if (status != DMUB_STATUS_OK) 1356 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1357 } else { 1358 /* Perform the full hardware initialization. */ 1359 r = dm_dmub_hw_init(adev); 1360 if (r) 1361 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1362 } 1363 } 1364 1365 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1366 { 1367 u64 pt_base; 1368 u32 logical_addr_low; 1369 u32 logical_addr_high; 1370 u32 agp_base, agp_bot, agp_top; 1371 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1372 1373 memset(pa_config, 0, sizeof(*pa_config)); 1374 1375 agp_base = 0; 1376 agp_bot = adev->gmc.agp_start >> 24; 1377 agp_top = adev->gmc.agp_end >> 24; 1378 1379 /* AGP aperture is disabled */ 1380 if (agp_bot > agp_top) { 1381 logical_addr_low = adev->gmc.fb_start >> 18; 1382 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1383 AMD_APU_IS_RENOIR | 1384 AMD_APU_IS_GREEN_SARDINE)) 1385 /* 1386 * Raven2 has a HW issue that it is unable to use the vram which 1387 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1388 * workaround that increase system aperture high address (add 1) 1389 * to get rid of the VM fault and hardware hang. 1390 */ 1391 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1392 else 1393 logical_addr_high = adev->gmc.fb_end >> 18; 1394 } else { 1395 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1396 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1397 AMD_APU_IS_RENOIR | 1398 AMD_APU_IS_GREEN_SARDINE)) 1399 /* 1400 * Raven2 has a HW issue that it is unable to use the vram which 1401 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1402 * workaround that increase system aperture high address (add 1) 1403 * to get rid of the VM fault and hardware hang. 1404 */ 1405 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1406 else 1407 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1408 } 1409 1410 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1411 1412 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1413 AMDGPU_GPU_PAGE_SHIFT); 1414 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1415 AMDGPU_GPU_PAGE_SHIFT); 1416 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1417 AMDGPU_GPU_PAGE_SHIFT); 1418 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1419 AMDGPU_GPU_PAGE_SHIFT); 1420 page_table_base.high_part = upper_32_bits(pt_base); 1421 page_table_base.low_part = lower_32_bits(pt_base); 1422 1423 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1424 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1425 1426 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1427 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1428 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1429 1430 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1431 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1432 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1433 1434 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1435 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1436 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1437 1438 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1439 1440 } 1441 1442 static void force_connector_state( 1443 struct amdgpu_dm_connector *aconnector, 1444 enum drm_connector_force force_state) 1445 { 1446 struct drm_connector *connector = &aconnector->base; 1447 1448 mutex_lock(&connector->dev->mode_config.mutex); 1449 aconnector->base.force = force_state; 1450 mutex_unlock(&connector->dev->mode_config.mutex); 1451 1452 mutex_lock(&aconnector->hpd_lock); 1453 drm_kms_helper_connector_hotplug_event(connector); 1454 mutex_unlock(&aconnector->hpd_lock); 1455 } 1456 1457 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1458 { 1459 struct hpd_rx_irq_offload_work *offload_work; 1460 struct amdgpu_dm_connector *aconnector; 1461 struct dc_link *dc_link; 1462 struct amdgpu_device *adev; 1463 enum dc_connection_type new_connection_type = dc_connection_none; 1464 unsigned long flags; 1465 union test_response test_response; 1466 1467 memset(&test_response, 0, sizeof(test_response)); 1468 1469 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1470 aconnector = offload_work->offload_wq->aconnector; 1471 1472 if (!aconnector) { 1473 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1474 goto skip; 1475 } 1476 1477 adev = drm_to_adev(aconnector->base.dev); 1478 dc_link = aconnector->dc_link; 1479 1480 mutex_lock(&aconnector->hpd_lock); 1481 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1482 DRM_ERROR("KMS: Failed to detect connector\n"); 1483 mutex_unlock(&aconnector->hpd_lock); 1484 1485 if (new_connection_type == dc_connection_none) 1486 goto skip; 1487 1488 if (amdgpu_in_reset(adev)) 1489 goto skip; 1490 1491 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1492 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1493 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1494 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1495 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1496 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1497 goto skip; 1498 } 1499 1500 mutex_lock(&adev->dm.dc_lock); 1501 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1502 dc_link_dp_handle_automated_test(dc_link); 1503 1504 if (aconnector->timing_changed) { 1505 /* force connector disconnect and reconnect */ 1506 force_connector_state(aconnector, DRM_FORCE_OFF); 1507 msleep(100); 1508 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1509 } 1510 1511 test_response.bits.ACK = 1; 1512 1513 core_link_write_dpcd( 1514 dc_link, 1515 DP_TEST_RESPONSE, 1516 &test_response.raw, 1517 sizeof(test_response)); 1518 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1519 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1520 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1521 /* offload_work->data is from handle_hpd_rx_irq-> 1522 * schedule_hpd_rx_offload_work.this is defer handle 1523 * for hpd short pulse. upon here, link status may be 1524 * changed, need get latest link status from dpcd 1525 * registers. if link status is good, skip run link 1526 * training again. 1527 */ 1528 union hpd_irq_data irq_data; 1529 1530 memset(&irq_data, 0, sizeof(irq_data)); 1531 1532 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1533 * request be added to work queue if link lost at end of dc_link_ 1534 * dp_handle_link_loss 1535 */ 1536 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1537 offload_work->offload_wq->is_handling_link_loss = false; 1538 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1539 1540 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1541 dc_link_check_link_loss_status(dc_link, &irq_data)) 1542 dc_link_dp_handle_link_loss(dc_link); 1543 } 1544 mutex_unlock(&adev->dm.dc_lock); 1545 1546 skip: 1547 kfree(offload_work); 1548 1549 } 1550 1551 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1552 { 1553 int max_caps = dc->caps.max_links; 1554 int i = 0; 1555 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1556 1557 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1558 1559 if (!hpd_rx_offload_wq) 1560 return NULL; 1561 1562 1563 for (i = 0; i < max_caps; i++) { 1564 hpd_rx_offload_wq[i].wq = 1565 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1566 1567 if (hpd_rx_offload_wq[i].wq == NULL) { 1568 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1569 goto out_err; 1570 } 1571 1572 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1573 } 1574 1575 return hpd_rx_offload_wq; 1576 1577 out_err: 1578 for (i = 0; i < max_caps; i++) { 1579 if (hpd_rx_offload_wq[i].wq) 1580 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1581 } 1582 kfree(hpd_rx_offload_wq); 1583 return NULL; 1584 } 1585 1586 struct amdgpu_stutter_quirk { 1587 u16 chip_vendor; 1588 u16 chip_device; 1589 u16 subsys_vendor; 1590 u16 subsys_device; 1591 u8 revision; 1592 }; 1593 1594 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1595 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1596 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1597 { 0, 0, 0, 0, 0 }, 1598 }; 1599 1600 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1601 { 1602 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1603 1604 while (p && p->chip_device != 0) { 1605 if (pdev->vendor == p->chip_vendor && 1606 pdev->device == p->chip_device && 1607 pdev->subsystem_vendor == p->subsys_vendor && 1608 pdev->subsystem_device == p->subsys_device && 1609 pdev->revision == p->revision) { 1610 return true; 1611 } 1612 ++p; 1613 } 1614 return false; 1615 } 1616 1617 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1618 { 1619 .matches = { 1620 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1621 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1622 }, 1623 }, 1624 { 1625 .matches = { 1626 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1627 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1628 }, 1629 }, 1630 { 1631 .matches = { 1632 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1633 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1634 }, 1635 }, 1636 { 1637 .matches = { 1638 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1639 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1640 }, 1641 }, 1642 { 1643 .matches = { 1644 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1645 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1646 }, 1647 }, 1648 { 1649 .matches = { 1650 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1651 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1652 }, 1653 }, 1654 { 1655 .matches = { 1656 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1657 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1658 }, 1659 }, 1660 { 1661 .matches = { 1662 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1663 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1664 }, 1665 }, 1666 { 1667 .matches = { 1668 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1669 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1670 }, 1671 }, 1672 {} 1673 /* TODO: refactor this from a fixed table to a dynamic option */ 1674 }; 1675 1676 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1677 { 1678 const struct dmi_system_id *dmi_id; 1679 1680 dm->aux_hpd_discon_quirk = false; 1681 1682 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1683 if (dmi_id) { 1684 dm->aux_hpd_discon_quirk = true; 1685 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1686 } 1687 } 1688 1689 void* 1690 dm_allocate_gpu_mem( 1691 struct amdgpu_device *adev, 1692 enum dc_gpu_mem_alloc_type type, 1693 size_t size, 1694 long long *addr) 1695 { 1696 struct dal_allocation *da; 1697 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1698 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1699 int ret; 1700 1701 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 1702 if (!da) 1703 return NULL; 1704 1705 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1706 domain, &da->bo, 1707 &da->gpu_addr, &da->cpu_ptr); 1708 1709 *addr = da->gpu_addr; 1710 1711 if (ret) { 1712 kfree(da); 1713 return NULL; 1714 } 1715 1716 /* add da to list in dm */ 1717 list_add(&da->list, &adev->dm.da_list); 1718 1719 return da->cpu_ptr; 1720 } 1721 1722 void 1723 dm_free_gpu_mem( 1724 struct amdgpu_device *adev, 1725 enum dc_gpu_mem_alloc_type type, 1726 void *pvMem) 1727 { 1728 struct dal_allocation *da; 1729 1730 /* walk the da list in DM */ 1731 list_for_each_entry(da, &adev->dm.da_list, list) { 1732 if (pvMem == da->cpu_ptr) { 1733 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1734 list_del(&da->list); 1735 kfree(da); 1736 break; 1737 } 1738 } 1739 1740 } 1741 1742 static enum dmub_status 1743 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, 1744 enum dmub_gpint_command command_code, 1745 uint16_t param, 1746 uint32_t timeout_us) 1747 { 1748 union dmub_gpint_data_register reg, test; 1749 uint32_t i; 1750 1751 /* Assume that VBIOS DMUB is ready to take commands */ 1752 1753 reg.bits.status = 1; 1754 reg.bits.command_code = command_code; 1755 reg.bits.param = param; 1756 1757 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); 1758 1759 for (i = 0; i < timeout_us; ++i) { 1760 udelay(1); 1761 1762 /* Check if our GPINT got acked */ 1763 reg.bits.status = 0; 1764 test = (union dmub_gpint_data_register) 1765 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); 1766 1767 if (test.all == reg.all) 1768 return DMUB_STATUS_OK; 1769 } 1770 1771 return DMUB_STATUS_TIMEOUT; 1772 } 1773 1774 static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) 1775 { 1776 struct dml2_soc_bb *bb; 1777 long long addr; 1778 int i = 0; 1779 uint16_t chunk; 1780 enum dmub_gpint_command send_addrs[] = { 1781 DMUB_GPINT__SET_BB_ADDR_WORD0, 1782 DMUB_GPINT__SET_BB_ADDR_WORD1, 1783 DMUB_GPINT__SET_BB_ADDR_WORD2, 1784 DMUB_GPINT__SET_BB_ADDR_WORD3, 1785 }; 1786 enum dmub_status ret; 1787 1788 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1789 case IP_VERSION(4, 0, 1): 1790 break; 1791 default: 1792 return NULL; 1793 } 1794 1795 bb = dm_allocate_gpu_mem(adev, 1796 DC_MEM_ALLOC_TYPE_GART, 1797 sizeof(struct dml2_soc_bb), 1798 &addr); 1799 if (!bb) 1800 return NULL; 1801 1802 for (i = 0; i < 4; i++) { 1803 /* Extract 16-bit chunk */ 1804 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; 1805 /* Send the chunk */ 1806 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); 1807 if (ret != DMUB_STATUS_OK) 1808 goto free_bb; 1809 } 1810 1811 /* Now ask DMUB to copy the bb */ 1812 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); 1813 if (ret != DMUB_STATUS_OK) 1814 goto free_bb; 1815 1816 return bb; 1817 1818 free_bb: 1819 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); 1820 return NULL; 1821 1822 } 1823 1824 static enum dmub_ips_disable_type dm_get_default_ips_mode( 1825 struct amdgpu_device *adev) 1826 { 1827 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; 1828 1829 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1830 case IP_VERSION(3, 5, 0): 1831 /* 1832 * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to 1833 * cause a hard hang. A fix exists for newer PMFW. 1834 * 1835 * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest 1836 * IPS state in all cases, except for s0ix and all displays off (DPMS), 1837 * where IPS2 is allowed. 1838 * 1839 * When checking pmfw version, use the major and minor only. 1840 */ 1841 if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) 1842 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1843 else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) 1844 /* 1845 * Other ASICs with DCN35 that have residency issues with 1846 * IPS2 in idle. 1847 * We want them to use IPS2 only in display off cases. 1848 */ 1849 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1850 break; 1851 case IP_VERSION(3, 5, 1): 1852 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1853 break; 1854 default: 1855 /* ASICs older than DCN35 do not have IPSs */ 1856 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) 1857 ret = DMUB_IPS_DISABLE_ALL; 1858 break; 1859 } 1860 1861 return ret; 1862 } 1863 1864 static int amdgpu_dm_init(struct amdgpu_device *adev) 1865 { 1866 struct dc_init_data init_data; 1867 struct dc_callback_init init_params; 1868 int r; 1869 1870 adev->dm.ddev = adev_to_drm(adev); 1871 adev->dm.adev = adev; 1872 1873 /* Zero all the fields */ 1874 memset(&init_data, 0, sizeof(init_data)); 1875 memset(&init_params, 0, sizeof(init_params)); 1876 1877 mutex_init(&adev->dm.dpia_aux_lock); 1878 mutex_init(&adev->dm.dc_lock); 1879 mutex_init(&adev->dm.audio_lock); 1880 1881 if (amdgpu_dm_irq_init(adev)) { 1882 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1883 goto error; 1884 } 1885 1886 init_data.asic_id.chip_family = adev->family; 1887 1888 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1889 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1890 init_data.asic_id.chip_id = adev->pdev->device; 1891 1892 init_data.asic_id.vram_width = adev->gmc.vram_width; 1893 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1894 init_data.asic_id.atombios_base_address = 1895 adev->mode_info.atom_context->bios; 1896 1897 init_data.driver = adev; 1898 1899 /* cgs_device was created in dm_sw_init() */ 1900 init_data.cgs_device = adev->dm.cgs_device; 1901 1902 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1903 1904 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1905 case IP_VERSION(2, 1, 0): 1906 switch (adev->dm.dmcub_fw_version) { 1907 case 0: /* development */ 1908 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1909 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1910 init_data.flags.disable_dmcu = false; 1911 break; 1912 default: 1913 init_data.flags.disable_dmcu = true; 1914 } 1915 break; 1916 case IP_VERSION(2, 0, 3): 1917 init_data.flags.disable_dmcu = true; 1918 break; 1919 default: 1920 break; 1921 } 1922 1923 /* APU support S/G display by default except: 1924 * ASICs before Carrizo, 1925 * RAVEN1 (Users reported stability issue) 1926 */ 1927 1928 if (adev->asic_type < CHIP_CARRIZO) { 1929 init_data.flags.gpu_vm_support = false; 1930 } else if (adev->asic_type == CHIP_RAVEN) { 1931 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1932 init_data.flags.gpu_vm_support = false; 1933 else 1934 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1935 } else { 1936 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) 1937 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); 1938 else 1939 init_data.flags.gpu_vm_support = 1940 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1941 } 1942 1943 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1944 1945 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1946 init_data.flags.fbc_support = true; 1947 1948 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1949 init_data.flags.multi_mon_pp_mclk_switch = true; 1950 1951 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1952 init_data.flags.disable_fractional_pwm = true; 1953 1954 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1955 init_data.flags.edp_no_power_sequencing = true; 1956 1957 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1958 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1959 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1960 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1961 1962 init_data.flags.seamless_boot_edp_requested = false; 1963 1964 if (amdgpu_device_seamless_boot_supported(adev)) { 1965 init_data.flags.seamless_boot_edp_requested = true; 1966 init_data.flags.allow_seamless_boot_optimization = true; 1967 DRM_INFO("Seamless boot condition check passed\n"); 1968 } 1969 1970 init_data.flags.enable_mipi_converter_optimization = true; 1971 1972 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1973 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1974 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1975 1976 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) 1977 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; 1978 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) 1979 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; 1980 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) 1981 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1982 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) 1983 init_data.flags.disable_ips = DMUB_IPS_ENABLE; 1984 else 1985 init_data.flags.disable_ips = dm_get_default_ips_mode(adev); 1986 1987 init_data.flags.disable_ips_in_vpb = 0; 1988 1989 /* Enable DWB for tested platforms only */ 1990 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) 1991 init_data.num_virtual_links = 1; 1992 1993 retrieve_dmi_info(&adev->dm); 1994 1995 if (adev->dm.bb_from_dmub) 1996 init_data.bb_from_dmub = adev->dm.bb_from_dmub; 1997 else 1998 init_data.bb_from_dmub = NULL; 1999 2000 /* Display Core create. */ 2001 adev->dm.dc = dc_create(&init_data); 2002 2003 if (adev->dm.dc) { 2004 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 2005 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 2006 } else { 2007 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 2008 goto error; 2009 } 2010 2011 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 2012 adev->dm.dc->debug.force_single_disp_pipe_split = false; 2013 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 2014 } 2015 2016 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2017 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2018 if (dm_should_disable_stutter(adev->pdev)) 2019 adev->dm.dc->debug.disable_stutter = true; 2020 2021 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 2022 adev->dm.dc->debug.disable_stutter = true; 2023 2024 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 2025 adev->dm.dc->debug.disable_dsc = true; 2026 2027 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 2028 adev->dm.dc->debug.disable_clock_gate = true; 2029 2030 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2031 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2032 2033 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2034 adev->dm.dc->debug.using_dml2 = true; 2035 adev->dm.dc->debug.using_dml21 = true; 2036 } 2037 2038 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2039 2040 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 2041 adev->dm.dc->debug.ignore_cable_id = true; 2042 2043 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 2044 DRM_INFO("DP-HDMI FRL PCON supported\n"); 2045 2046 r = dm_dmub_hw_init(adev); 2047 if (r) { 2048 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2049 goto error; 2050 } 2051 2052 dc_hardware_init(adev->dm.dc); 2053 2054 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 2055 if (!adev->dm.hpd_rx_offload_wq) { 2056 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 2057 goto error; 2058 } 2059 2060 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 2061 struct dc_phy_addr_space_config pa_config; 2062 2063 mmhub_read_system_context(adev, &pa_config); 2064 2065 // Call the DC init_memory func 2066 dc_setup_system_context(adev->dm.dc, &pa_config); 2067 } 2068 2069 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 2070 if (!adev->dm.freesync_module) { 2071 DRM_ERROR( 2072 "amdgpu: failed to initialize freesync_module.\n"); 2073 } else 2074 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 2075 adev->dm.freesync_module); 2076 2077 amdgpu_dm_init_color_mod(); 2078 2079 if (adev->dm.dc->caps.max_links > 0) { 2080 adev->dm.vblank_control_workqueue = 2081 create_singlethread_workqueue("dm_vblank_control_workqueue"); 2082 if (!adev->dm.vblank_control_workqueue) 2083 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 2084 } 2085 2086 if (adev->dm.dc->caps.ips_support && 2087 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) 2088 adev->dm.idle_workqueue = idle_create_workqueue(adev); 2089 2090 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 2091 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 2092 2093 if (!adev->dm.hdcp_workqueue) 2094 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 2095 else 2096 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 2097 2098 dc_init_callbacks(adev->dm.dc, &init_params); 2099 } 2100 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2101 init_completion(&adev->dm.dmub_aux_transfer_done); 2102 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 2103 if (!adev->dm.dmub_notify) { 2104 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 2105 goto error; 2106 } 2107 2108 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 2109 if (!adev->dm.delayed_hpd_wq) { 2110 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 2111 goto error; 2112 } 2113 2114 amdgpu_dm_outbox_init(adev); 2115 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2116 dmub_aux_setconfig_callback, false)) { 2117 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 2118 goto error; 2119 } 2120 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 2121 * It is expected that DMUB will resend any pending notifications at this point. Note 2122 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 2123 * align legacy interface initialization sequence. Connection status will be proactivly 2124 * detected once in the amdgpu_dm_initialize_drm_device. 2125 */ 2126 dc_enable_dmub_outbox(adev->dm.dc); 2127 2128 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 2129 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 2130 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 2131 } 2132 2133 if (amdgpu_dm_initialize_drm_device(adev)) { 2134 DRM_ERROR( 2135 "amdgpu: failed to initialize sw for display support.\n"); 2136 goto error; 2137 } 2138 2139 /* create fake encoders for MST */ 2140 dm_dp_create_fake_mst_encoders(adev); 2141 2142 /* TODO: Add_display_info? */ 2143 2144 /* TODO use dynamic cursor width */ 2145 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 2146 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 2147 2148 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 2149 DRM_ERROR( 2150 "amdgpu: failed to initialize sw for display support.\n"); 2151 goto error; 2152 } 2153 2154 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2155 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); 2156 if (!adev->dm.secure_display_ctxs) 2157 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 2158 #endif 2159 2160 DRM_DEBUG_DRIVER("KMS initialized.\n"); 2161 2162 return 0; 2163 error: 2164 amdgpu_dm_fini(adev); 2165 2166 return -EINVAL; 2167 } 2168 2169 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) 2170 { 2171 struct amdgpu_device *adev = ip_block->adev; 2172 2173 amdgpu_dm_audio_fini(adev); 2174 2175 return 0; 2176 } 2177 2178 static void amdgpu_dm_fini(struct amdgpu_device *adev) 2179 { 2180 int i; 2181 2182 if (adev->dm.vblank_control_workqueue) { 2183 destroy_workqueue(adev->dm.vblank_control_workqueue); 2184 adev->dm.vblank_control_workqueue = NULL; 2185 } 2186 2187 if (adev->dm.idle_workqueue) { 2188 if (adev->dm.idle_workqueue->running) { 2189 adev->dm.idle_workqueue->enable = false; 2190 flush_work(&adev->dm.idle_workqueue->work); 2191 } 2192 2193 kfree(adev->dm.idle_workqueue); 2194 adev->dm.idle_workqueue = NULL; 2195 } 2196 2197 amdgpu_dm_destroy_drm_device(&adev->dm); 2198 2199 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2200 if (adev->dm.secure_display_ctxs) { 2201 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2202 if (adev->dm.secure_display_ctxs[i].crtc) { 2203 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 2204 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 2205 } 2206 } 2207 kfree(adev->dm.secure_display_ctxs); 2208 adev->dm.secure_display_ctxs = NULL; 2209 } 2210 #endif 2211 if (adev->dm.hdcp_workqueue) { 2212 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 2213 adev->dm.hdcp_workqueue = NULL; 2214 } 2215 2216 if (adev->dm.dc) { 2217 dc_deinit_callbacks(adev->dm.dc); 2218 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 2219 if (dc_enable_dmub_notifications(adev->dm.dc)) { 2220 kfree(adev->dm.dmub_notify); 2221 adev->dm.dmub_notify = NULL; 2222 destroy_workqueue(adev->dm.delayed_hpd_wq); 2223 adev->dm.delayed_hpd_wq = NULL; 2224 } 2225 } 2226 2227 if (adev->dm.dmub_bo) 2228 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 2229 &adev->dm.dmub_bo_gpu_addr, 2230 &adev->dm.dmub_bo_cpu_addr); 2231 2232 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { 2233 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 2234 if (adev->dm.hpd_rx_offload_wq[i].wq) { 2235 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 2236 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 2237 } 2238 } 2239 2240 kfree(adev->dm.hpd_rx_offload_wq); 2241 adev->dm.hpd_rx_offload_wq = NULL; 2242 } 2243 2244 /* DC Destroy TODO: Replace destroy DAL */ 2245 if (adev->dm.dc) 2246 dc_destroy(&adev->dm.dc); 2247 /* 2248 * TODO: pageflip, vlank interrupt 2249 * 2250 * amdgpu_dm_irq_fini(adev); 2251 */ 2252 2253 if (adev->dm.cgs_device) { 2254 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 2255 adev->dm.cgs_device = NULL; 2256 } 2257 if (adev->dm.freesync_module) { 2258 mod_freesync_destroy(adev->dm.freesync_module); 2259 adev->dm.freesync_module = NULL; 2260 } 2261 2262 mutex_destroy(&adev->dm.audio_lock); 2263 mutex_destroy(&adev->dm.dc_lock); 2264 mutex_destroy(&adev->dm.dpia_aux_lock); 2265 } 2266 2267 static int load_dmcu_fw(struct amdgpu_device *adev) 2268 { 2269 const char *fw_name_dmcu = NULL; 2270 int r; 2271 const struct dmcu_firmware_header_v1_0 *hdr; 2272 2273 switch (adev->asic_type) { 2274 #if defined(CONFIG_DRM_AMD_DC_SI) 2275 case CHIP_TAHITI: 2276 case CHIP_PITCAIRN: 2277 case CHIP_VERDE: 2278 case CHIP_OLAND: 2279 #endif 2280 case CHIP_BONAIRE: 2281 case CHIP_HAWAII: 2282 case CHIP_KAVERI: 2283 case CHIP_KABINI: 2284 case CHIP_MULLINS: 2285 case CHIP_TONGA: 2286 case CHIP_FIJI: 2287 case CHIP_CARRIZO: 2288 case CHIP_STONEY: 2289 case CHIP_POLARIS11: 2290 case CHIP_POLARIS10: 2291 case CHIP_POLARIS12: 2292 case CHIP_VEGAM: 2293 case CHIP_VEGA10: 2294 case CHIP_VEGA12: 2295 case CHIP_VEGA20: 2296 return 0; 2297 case CHIP_NAVI12: 2298 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 2299 break; 2300 case CHIP_RAVEN: 2301 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2302 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2303 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2304 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2305 else 2306 return 0; 2307 break; 2308 default: 2309 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2310 case IP_VERSION(2, 0, 2): 2311 case IP_VERSION(2, 0, 3): 2312 case IP_VERSION(2, 0, 0): 2313 case IP_VERSION(2, 1, 0): 2314 case IP_VERSION(3, 0, 0): 2315 case IP_VERSION(3, 0, 2): 2316 case IP_VERSION(3, 0, 3): 2317 case IP_VERSION(3, 0, 1): 2318 case IP_VERSION(3, 1, 2): 2319 case IP_VERSION(3, 1, 3): 2320 case IP_VERSION(3, 1, 4): 2321 case IP_VERSION(3, 1, 5): 2322 case IP_VERSION(3, 1, 6): 2323 case IP_VERSION(3, 2, 0): 2324 case IP_VERSION(3, 2, 1): 2325 case IP_VERSION(3, 5, 0): 2326 case IP_VERSION(3, 5, 1): 2327 case IP_VERSION(4, 0, 1): 2328 return 0; 2329 default: 2330 break; 2331 } 2332 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2333 return -EINVAL; 2334 } 2335 2336 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2337 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2338 return 0; 2339 } 2340 2341 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu); 2342 if (r == -ENODEV) { 2343 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2344 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2345 adev->dm.fw_dmcu = NULL; 2346 return 0; 2347 } 2348 if (r) { 2349 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2350 fw_name_dmcu); 2351 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2352 return r; 2353 } 2354 2355 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2356 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2357 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2358 adev->firmware.fw_size += 2359 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2360 2361 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2362 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2363 adev->firmware.fw_size += 2364 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2365 2366 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2367 2368 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2369 2370 return 0; 2371 } 2372 2373 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2374 { 2375 struct amdgpu_device *adev = ctx; 2376 2377 return dm_read_reg(adev->dm.dc->ctx, address); 2378 } 2379 2380 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2381 uint32_t value) 2382 { 2383 struct amdgpu_device *adev = ctx; 2384 2385 return dm_write_reg(adev->dm.dc->ctx, address, value); 2386 } 2387 2388 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2389 { 2390 struct dmub_srv_create_params create_params; 2391 struct dmub_srv_region_params region_params; 2392 struct dmub_srv_region_info region_info; 2393 struct dmub_srv_memory_params memory_params; 2394 struct dmub_srv_fb_info *fb_info; 2395 struct dmub_srv *dmub_srv; 2396 const struct dmcub_firmware_header_v1_0 *hdr; 2397 enum dmub_asic dmub_asic; 2398 enum dmub_status status; 2399 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { 2400 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST 2401 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK 2402 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA 2403 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS 2404 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX 2405 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF 2406 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE 2407 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM 2408 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE 2409 }; 2410 int r; 2411 2412 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2413 case IP_VERSION(2, 1, 0): 2414 dmub_asic = DMUB_ASIC_DCN21; 2415 break; 2416 case IP_VERSION(3, 0, 0): 2417 dmub_asic = DMUB_ASIC_DCN30; 2418 break; 2419 case IP_VERSION(3, 0, 1): 2420 dmub_asic = DMUB_ASIC_DCN301; 2421 break; 2422 case IP_VERSION(3, 0, 2): 2423 dmub_asic = DMUB_ASIC_DCN302; 2424 break; 2425 case IP_VERSION(3, 0, 3): 2426 dmub_asic = DMUB_ASIC_DCN303; 2427 break; 2428 case IP_VERSION(3, 1, 2): 2429 case IP_VERSION(3, 1, 3): 2430 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2431 break; 2432 case IP_VERSION(3, 1, 4): 2433 dmub_asic = DMUB_ASIC_DCN314; 2434 break; 2435 case IP_VERSION(3, 1, 5): 2436 dmub_asic = DMUB_ASIC_DCN315; 2437 break; 2438 case IP_VERSION(3, 1, 6): 2439 dmub_asic = DMUB_ASIC_DCN316; 2440 break; 2441 case IP_VERSION(3, 2, 0): 2442 dmub_asic = DMUB_ASIC_DCN32; 2443 break; 2444 case IP_VERSION(3, 2, 1): 2445 dmub_asic = DMUB_ASIC_DCN321; 2446 break; 2447 case IP_VERSION(3, 5, 0): 2448 case IP_VERSION(3, 5, 1): 2449 dmub_asic = DMUB_ASIC_DCN35; 2450 break; 2451 case IP_VERSION(4, 0, 1): 2452 dmub_asic = DMUB_ASIC_DCN401; 2453 break; 2454 2455 default: 2456 /* ASIC doesn't support DMUB. */ 2457 return 0; 2458 } 2459 2460 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2461 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2462 2463 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2464 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2465 AMDGPU_UCODE_ID_DMCUB; 2466 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2467 adev->dm.dmub_fw; 2468 adev->firmware.fw_size += 2469 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2470 2471 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2472 adev->dm.dmcub_fw_version); 2473 } 2474 2475 2476 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2477 dmub_srv = adev->dm.dmub_srv; 2478 2479 if (!dmub_srv) { 2480 DRM_ERROR("Failed to allocate DMUB service!\n"); 2481 return -ENOMEM; 2482 } 2483 2484 memset(&create_params, 0, sizeof(create_params)); 2485 create_params.user_ctx = adev; 2486 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2487 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2488 create_params.asic = dmub_asic; 2489 2490 /* Create the DMUB service. */ 2491 status = dmub_srv_create(dmub_srv, &create_params); 2492 if (status != DMUB_STATUS_OK) { 2493 DRM_ERROR("Error creating DMUB service: %d\n", status); 2494 return -EINVAL; 2495 } 2496 2497 /* Calculate the size of all the regions for the DMUB service. */ 2498 memset(®ion_params, 0, sizeof(region_params)); 2499 2500 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2501 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2502 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2503 region_params.vbios_size = adev->bios_size; 2504 region_params.fw_bss_data = region_params.bss_data_size ? 2505 adev->dm.dmub_fw->data + 2506 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2507 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2508 region_params.fw_inst_const = 2509 adev->dm.dmub_fw->data + 2510 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2511 PSP_HEADER_BYTES; 2512 region_params.window_memory_type = window_memory_type; 2513 2514 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2515 ®ion_info); 2516 2517 if (status != DMUB_STATUS_OK) { 2518 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2519 return -EINVAL; 2520 } 2521 2522 /* 2523 * Allocate a framebuffer based on the total size of all the regions. 2524 * TODO: Move this into GART. 2525 */ 2526 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2527 AMDGPU_GEM_DOMAIN_VRAM | 2528 AMDGPU_GEM_DOMAIN_GTT, 2529 &adev->dm.dmub_bo, 2530 &adev->dm.dmub_bo_gpu_addr, 2531 &adev->dm.dmub_bo_cpu_addr); 2532 if (r) 2533 return r; 2534 2535 /* Rebase the regions on the framebuffer address. */ 2536 memset(&memory_params, 0, sizeof(memory_params)); 2537 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2538 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2539 memory_params.region_info = ®ion_info; 2540 memory_params.window_memory_type = window_memory_type; 2541 2542 adev->dm.dmub_fb_info = 2543 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2544 fb_info = adev->dm.dmub_fb_info; 2545 2546 if (!fb_info) { 2547 DRM_ERROR( 2548 "Failed to allocate framebuffer info for DMUB service!\n"); 2549 return -ENOMEM; 2550 } 2551 2552 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2553 if (status != DMUB_STATUS_OK) { 2554 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2555 return -EINVAL; 2556 } 2557 2558 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); 2559 2560 return 0; 2561 } 2562 2563 static int dm_sw_init(struct amdgpu_ip_block *ip_block) 2564 { 2565 struct amdgpu_device *adev = ip_block->adev; 2566 int r; 2567 2568 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 2569 2570 if (!adev->dm.cgs_device) { 2571 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 2572 return -EINVAL; 2573 } 2574 2575 /* Moved from dm init since we need to use allocations for storing bounding box data */ 2576 INIT_LIST_HEAD(&adev->dm.da_list); 2577 2578 r = dm_dmub_sw_init(adev); 2579 if (r) 2580 return r; 2581 2582 return load_dmcu_fw(adev); 2583 } 2584 2585 static int dm_sw_fini(struct amdgpu_ip_block *ip_block) 2586 { 2587 struct amdgpu_device *adev = ip_block->adev; 2588 struct dal_allocation *da; 2589 2590 list_for_each_entry(da, &adev->dm.da_list, list) { 2591 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { 2592 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 2593 list_del(&da->list); 2594 kfree(da); 2595 adev->dm.bb_from_dmub = NULL; 2596 break; 2597 } 2598 } 2599 2600 2601 kfree(adev->dm.dmub_fb_info); 2602 adev->dm.dmub_fb_info = NULL; 2603 2604 if (adev->dm.dmub_srv) { 2605 dmub_srv_destroy(adev->dm.dmub_srv); 2606 kfree(adev->dm.dmub_srv); 2607 adev->dm.dmub_srv = NULL; 2608 } 2609 2610 amdgpu_ucode_release(&adev->dm.dmub_fw); 2611 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2612 2613 return 0; 2614 } 2615 2616 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2617 { 2618 struct amdgpu_dm_connector *aconnector; 2619 struct drm_connector *connector; 2620 struct drm_connector_list_iter iter; 2621 int ret = 0; 2622 2623 drm_connector_list_iter_begin(dev, &iter); 2624 drm_for_each_connector_iter(connector, &iter) { 2625 2626 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2627 continue; 2628 2629 aconnector = to_amdgpu_dm_connector(connector); 2630 if (aconnector->dc_link->type == dc_connection_mst_branch && 2631 aconnector->mst_mgr.aux) { 2632 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", 2633 aconnector, 2634 aconnector->base.base.id); 2635 2636 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2637 if (ret < 0) { 2638 drm_err(dev, "DM_MST: Failed to start MST\n"); 2639 aconnector->dc_link->type = 2640 dc_connection_single; 2641 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2642 aconnector->dc_link); 2643 break; 2644 } 2645 } 2646 } 2647 drm_connector_list_iter_end(&iter); 2648 2649 return ret; 2650 } 2651 2652 static int dm_late_init(struct amdgpu_ip_block *ip_block) 2653 { 2654 struct amdgpu_device *adev = ip_block->adev; 2655 2656 struct dmcu_iram_parameters params; 2657 unsigned int linear_lut[16]; 2658 int i; 2659 struct dmcu *dmcu = NULL; 2660 2661 dmcu = adev->dm.dc->res_pool->dmcu; 2662 2663 for (i = 0; i < 16; i++) 2664 linear_lut[i] = 0xFFFF * i / 15; 2665 2666 params.set = 0; 2667 params.backlight_ramping_override = false; 2668 params.backlight_ramping_start = 0xCCCC; 2669 params.backlight_ramping_reduction = 0xCCCCCCCC; 2670 params.backlight_lut_array_size = 16; 2671 params.backlight_lut_array = linear_lut; 2672 2673 /* Min backlight level after ABM reduction, Don't allow below 1% 2674 * 0xFFFF x 0.01 = 0x28F 2675 */ 2676 params.min_abm_backlight = 0x28F; 2677 /* In the case where abm is implemented on dmcub, 2678 * dmcu object will be null. 2679 * ABM 2.4 and up are implemented on dmcub. 2680 */ 2681 if (dmcu) { 2682 if (!dmcu_load_iram(dmcu, params)) 2683 return -EINVAL; 2684 } else if (adev->dm.dc->ctx->dmub_srv) { 2685 struct dc_link *edp_links[MAX_NUM_EDP]; 2686 int edp_num; 2687 2688 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2689 for (i = 0; i < edp_num; i++) { 2690 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2691 return -EINVAL; 2692 } 2693 } 2694 2695 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2696 } 2697 2698 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2699 { 2700 u8 buf[UUID_SIZE]; 2701 guid_t guid; 2702 int ret; 2703 2704 mutex_lock(&mgr->lock); 2705 if (!mgr->mst_primary) 2706 goto out_fail; 2707 2708 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2709 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2710 goto out_fail; 2711 } 2712 2713 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2714 DP_MST_EN | 2715 DP_UP_REQ_EN | 2716 DP_UPSTREAM_IS_SRC); 2717 if (ret < 0) { 2718 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2719 goto out_fail; 2720 } 2721 2722 /* Some hubs forget their guids after they resume */ 2723 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 2724 if (ret != sizeof(buf)) { 2725 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2726 goto out_fail; 2727 } 2728 2729 import_guid(&guid, buf); 2730 2731 if (guid_is_null(&guid)) { 2732 guid_gen(&guid); 2733 export_guid(buf, &guid); 2734 2735 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); 2736 2737 if (ret != sizeof(buf)) { 2738 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2739 goto out_fail; 2740 } 2741 } 2742 2743 guid_copy(&mgr->mst_primary->guid, &guid); 2744 2745 out_fail: 2746 mutex_unlock(&mgr->lock); 2747 } 2748 2749 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2750 { 2751 struct amdgpu_dm_connector *aconnector; 2752 struct drm_connector *connector; 2753 struct drm_connector_list_iter iter; 2754 struct drm_dp_mst_topology_mgr *mgr; 2755 2756 drm_connector_list_iter_begin(dev, &iter); 2757 drm_for_each_connector_iter(connector, &iter) { 2758 2759 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2760 continue; 2761 2762 aconnector = to_amdgpu_dm_connector(connector); 2763 if (aconnector->dc_link->type != dc_connection_mst_branch || 2764 aconnector->mst_root) 2765 continue; 2766 2767 mgr = &aconnector->mst_mgr; 2768 2769 if (suspend) { 2770 drm_dp_mst_topology_mgr_suspend(mgr); 2771 } else { 2772 /* if extended timeout is supported in hardware, 2773 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2774 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2775 */ 2776 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2777 if (!dp_is_lttpr_present(aconnector->dc_link)) 2778 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2779 2780 /* TODO: move resume_mst_branch_status() into drm mst resume again 2781 * once topology probing work is pulled out from mst resume into mst 2782 * resume 2nd step. mst resume 2nd step should be called after old 2783 * state getting restored (i.e. drm_atomic_helper_resume()). 2784 */ 2785 resume_mst_branch_status(mgr); 2786 } 2787 } 2788 drm_connector_list_iter_end(&iter); 2789 } 2790 2791 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2792 { 2793 int ret = 0; 2794 2795 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2796 * on window driver dc implementation. 2797 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2798 * should be passed to smu during boot up and resume from s3. 2799 * boot up: dc calculate dcn watermark clock settings within dc_create, 2800 * dcn20_resource_construct 2801 * then call pplib functions below to pass the settings to smu: 2802 * smu_set_watermarks_for_clock_ranges 2803 * smu_set_watermarks_table 2804 * navi10_set_watermarks_table 2805 * smu_write_watermarks_table 2806 * 2807 * For Renoir, clock settings of dcn watermark are also fixed values. 2808 * dc has implemented different flow for window driver: 2809 * dc_hardware_init / dc_set_power_state 2810 * dcn10_init_hw 2811 * notify_wm_ranges 2812 * set_wm_ranges 2813 * -- Linux 2814 * smu_set_watermarks_for_clock_ranges 2815 * renoir_set_watermarks_table 2816 * smu_write_watermarks_table 2817 * 2818 * For Linux, 2819 * dc_hardware_init -> amdgpu_dm_init 2820 * dc_set_power_state --> dm_resume 2821 * 2822 * therefore, this function apply to navi10/12/14 but not Renoir 2823 * * 2824 */ 2825 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2826 case IP_VERSION(2, 0, 2): 2827 case IP_VERSION(2, 0, 0): 2828 break; 2829 default: 2830 return 0; 2831 } 2832 2833 ret = amdgpu_dpm_write_watermarks_table(adev); 2834 if (ret) { 2835 DRM_ERROR("Failed to update WMTABLE!\n"); 2836 return ret; 2837 } 2838 2839 return 0; 2840 } 2841 2842 /** 2843 * dm_hw_init() - Initialize DC device 2844 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2845 * 2846 * Initialize the &struct amdgpu_display_manager device. This involves calling 2847 * the initializers of each DM component, then populating the struct with them. 2848 * 2849 * Although the function implies hardware initialization, both hardware and 2850 * software are initialized here. Splitting them out to their relevant init 2851 * hooks is a future TODO item. 2852 * 2853 * Some notable things that are initialized here: 2854 * 2855 * - Display Core, both software and hardware 2856 * - DC modules that we need (freesync and color management) 2857 * - DRM software states 2858 * - Interrupt sources and handlers 2859 * - Vblank support 2860 * - Debug FS entries, if enabled 2861 */ 2862 static int dm_hw_init(struct amdgpu_ip_block *ip_block) 2863 { 2864 struct amdgpu_device *adev = ip_block->adev; 2865 int r; 2866 2867 /* Create DAL display manager */ 2868 r = amdgpu_dm_init(adev); 2869 if (r) 2870 return r; 2871 amdgpu_dm_hpd_init(adev); 2872 2873 return 0; 2874 } 2875 2876 /** 2877 * dm_hw_fini() - Teardown DC device 2878 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2879 * 2880 * Teardown components within &struct amdgpu_display_manager that require 2881 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2882 * were loaded. Also flush IRQ workqueues and disable them. 2883 */ 2884 static int dm_hw_fini(struct amdgpu_ip_block *ip_block) 2885 { 2886 struct amdgpu_device *adev = ip_block->adev; 2887 2888 amdgpu_dm_hpd_fini(adev); 2889 2890 amdgpu_dm_irq_fini(adev); 2891 amdgpu_dm_fini(adev); 2892 return 0; 2893 } 2894 2895 2896 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2897 struct dc_state *state, bool enable) 2898 { 2899 enum dc_irq_source irq_source; 2900 struct amdgpu_crtc *acrtc; 2901 int rc = -EBUSY; 2902 int i = 0; 2903 2904 for (i = 0; i < state->stream_count; i++) { 2905 acrtc = get_crtc_by_otg_inst( 2906 adev, state->stream_status[i].primary_otg_inst); 2907 2908 if (acrtc && state->stream_status[i].plane_count != 0) { 2909 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2910 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2911 if (rc) 2912 DRM_WARN("Failed to %s pflip interrupts\n", 2913 enable ? "enable" : "disable"); 2914 2915 if (enable) { 2916 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2917 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2918 } else 2919 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2920 2921 if (rc) 2922 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2923 2924 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2925 /* During gpu-reset we disable and then enable vblank irq, so 2926 * don't use amdgpu_irq_get/put() to avoid refcount change. 2927 */ 2928 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2929 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2930 } 2931 } 2932 2933 } 2934 2935 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2936 { 2937 struct dc_state *context = NULL; 2938 enum dc_status res = DC_ERROR_UNEXPECTED; 2939 int i; 2940 struct dc_stream_state *del_streams[MAX_PIPES]; 2941 int del_streams_count = 0; 2942 struct dc_commit_streams_params params = {}; 2943 2944 memset(del_streams, 0, sizeof(del_streams)); 2945 2946 context = dc_state_create_current_copy(dc); 2947 if (context == NULL) 2948 goto context_alloc_fail; 2949 2950 /* First remove from context all streams */ 2951 for (i = 0; i < context->stream_count; i++) { 2952 struct dc_stream_state *stream = context->streams[i]; 2953 2954 del_streams[del_streams_count++] = stream; 2955 } 2956 2957 /* Remove all planes for removed streams and then remove the streams */ 2958 for (i = 0; i < del_streams_count; i++) { 2959 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2960 res = DC_FAIL_DETACH_SURFACES; 2961 goto fail; 2962 } 2963 2964 res = dc_state_remove_stream(dc, context, del_streams[i]); 2965 if (res != DC_OK) 2966 goto fail; 2967 } 2968 2969 params.streams = context->streams; 2970 params.stream_count = context->stream_count; 2971 res = dc_commit_streams(dc, ¶ms); 2972 2973 fail: 2974 dc_state_release(context); 2975 2976 context_alloc_fail: 2977 return res; 2978 } 2979 2980 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2981 { 2982 int i; 2983 2984 if (dm->hpd_rx_offload_wq) { 2985 for (i = 0; i < dm->dc->caps.max_links; i++) 2986 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2987 } 2988 } 2989 2990 static int dm_suspend(struct amdgpu_ip_block *ip_block) 2991 { 2992 struct amdgpu_device *adev = ip_block->adev; 2993 struct amdgpu_display_manager *dm = &adev->dm; 2994 int ret = 0; 2995 2996 if (amdgpu_in_reset(adev)) { 2997 mutex_lock(&dm->dc_lock); 2998 2999 dc_allow_idle_optimizations(adev->dm.dc, false); 3000 3001 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 3002 3003 if (dm->cached_dc_state) 3004 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 3005 3006 amdgpu_dm_commit_zero_streams(dm->dc); 3007 3008 amdgpu_dm_irq_suspend(adev); 3009 3010 hpd_rx_irq_work_suspend(dm); 3011 3012 return ret; 3013 } 3014 3015 WARN_ON(adev->dm.cached_state); 3016 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3017 if (IS_ERR(adev->dm.cached_state)) 3018 return PTR_ERR(adev->dm.cached_state); 3019 3020 s3_handle_mst(adev_to_drm(adev), true); 3021 3022 amdgpu_dm_irq_suspend(adev); 3023 3024 hpd_rx_irq_work_suspend(dm); 3025 3026 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 3027 3028 if (dm->dc->caps.ips_support && adev->in_s0ix) 3029 dc_allow_idle_optimizations(dm->dc, true); 3030 3031 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 3032 3033 return 0; 3034 } 3035 3036 struct drm_connector * 3037 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 3038 struct drm_crtc *crtc) 3039 { 3040 u32 i; 3041 struct drm_connector_state *new_con_state; 3042 struct drm_connector *connector; 3043 struct drm_crtc *crtc_from_state; 3044 3045 for_each_new_connector_in_state(state, connector, new_con_state, i) { 3046 crtc_from_state = new_con_state->crtc; 3047 3048 if (crtc_from_state == crtc) 3049 return connector; 3050 } 3051 3052 return NULL; 3053 } 3054 3055 static void emulated_link_detect(struct dc_link *link) 3056 { 3057 struct dc_sink_init_data sink_init_data = { 0 }; 3058 struct display_sink_capability sink_caps = { 0 }; 3059 enum dc_edid_status edid_status; 3060 struct dc_context *dc_ctx = link->ctx; 3061 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 3062 struct dc_sink *sink = NULL; 3063 struct dc_sink *prev_sink = NULL; 3064 3065 link->type = dc_connection_none; 3066 prev_sink = link->local_sink; 3067 3068 if (prev_sink) 3069 dc_sink_release(prev_sink); 3070 3071 switch (link->connector_signal) { 3072 case SIGNAL_TYPE_HDMI_TYPE_A: { 3073 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3074 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 3075 break; 3076 } 3077 3078 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 3079 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3080 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 3081 break; 3082 } 3083 3084 case SIGNAL_TYPE_DVI_DUAL_LINK: { 3085 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3086 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 3087 break; 3088 } 3089 3090 case SIGNAL_TYPE_LVDS: { 3091 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3092 sink_caps.signal = SIGNAL_TYPE_LVDS; 3093 break; 3094 } 3095 3096 case SIGNAL_TYPE_EDP: { 3097 sink_caps.transaction_type = 3098 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3099 sink_caps.signal = SIGNAL_TYPE_EDP; 3100 break; 3101 } 3102 3103 case SIGNAL_TYPE_DISPLAY_PORT: { 3104 sink_caps.transaction_type = 3105 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3106 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 3107 break; 3108 } 3109 3110 default: 3111 drm_err(dev, "Invalid connector type! signal:%d\n", 3112 link->connector_signal); 3113 return; 3114 } 3115 3116 sink_init_data.link = link; 3117 sink_init_data.sink_signal = sink_caps.signal; 3118 3119 sink = dc_sink_create(&sink_init_data); 3120 if (!sink) { 3121 drm_err(dev, "Failed to create sink!\n"); 3122 return; 3123 } 3124 3125 /* dc_sink_create returns a new reference */ 3126 link->local_sink = sink; 3127 3128 edid_status = dm_helpers_read_local_edid( 3129 link->ctx, 3130 link, 3131 sink); 3132 3133 if (edid_status != EDID_OK) 3134 drm_err(dev, "Failed to read EDID\n"); 3135 3136 } 3137 3138 static void dm_gpureset_commit_state(struct dc_state *dc_state, 3139 struct amdgpu_display_manager *dm) 3140 { 3141 struct { 3142 struct dc_surface_update surface_updates[MAX_SURFACES]; 3143 struct dc_plane_info plane_infos[MAX_SURFACES]; 3144 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 3145 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 3146 struct dc_stream_update stream_update; 3147 } *bundle; 3148 int k, m; 3149 3150 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 3151 3152 if (!bundle) { 3153 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 3154 goto cleanup; 3155 } 3156 3157 for (k = 0; k < dc_state->stream_count; k++) { 3158 bundle->stream_update.stream = dc_state->streams[k]; 3159 3160 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 3161 bundle->surface_updates[m].surface = 3162 dc_state->stream_status->plane_states[m]; 3163 bundle->surface_updates[m].surface->force_full_update = 3164 true; 3165 } 3166 3167 update_planes_and_stream_adapter(dm->dc, 3168 UPDATE_TYPE_FULL, 3169 dc_state->stream_status->plane_count, 3170 dc_state->streams[k], 3171 &bundle->stream_update, 3172 bundle->surface_updates); 3173 } 3174 3175 cleanup: 3176 kfree(bundle); 3177 } 3178 3179 static int dm_resume(struct amdgpu_ip_block *ip_block) 3180 { 3181 struct amdgpu_device *adev = ip_block->adev; 3182 struct drm_device *ddev = adev_to_drm(adev); 3183 struct amdgpu_display_manager *dm = &adev->dm; 3184 struct amdgpu_dm_connector *aconnector; 3185 struct drm_connector *connector; 3186 struct drm_connector_list_iter iter; 3187 struct drm_crtc *crtc; 3188 struct drm_crtc_state *new_crtc_state; 3189 struct dm_crtc_state *dm_new_crtc_state; 3190 struct drm_plane *plane; 3191 struct drm_plane_state *new_plane_state; 3192 struct dm_plane_state *dm_new_plane_state; 3193 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 3194 enum dc_connection_type new_connection_type = dc_connection_none; 3195 struct dc_state *dc_state; 3196 int i, r, j; 3197 struct dc_commit_streams_params commit_params = {}; 3198 3199 if (dm->dc->caps.ips_support) { 3200 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3201 } 3202 3203 if (amdgpu_in_reset(adev)) { 3204 dc_state = dm->cached_dc_state; 3205 3206 /* 3207 * The dc->current_state is backed up into dm->cached_dc_state 3208 * before we commit 0 streams. 3209 * 3210 * DC will clear link encoder assignments on the real state 3211 * but the changes won't propagate over to the copy we made 3212 * before the 0 streams commit. 3213 * 3214 * DC expects that link encoder assignments are *not* valid 3215 * when committing a state, so as a workaround we can copy 3216 * off of the current state. 3217 * 3218 * We lose the previous assignments, but we had already 3219 * commit 0 streams anyway. 3220 */ 3221 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 3222 3223 r = dm_dmub_hw_init(adev); 3224 if (r) 3225 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 3226 3227 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3228 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3229 3230 dc_resume(dm->dc); 3231 3232 amdgpu_dm_irq_resume_early(adev); 3233 3234 for (i = 0; i < dc_state->stream_count; i++) { 3235 dc_state->streams[i]->mode_changed = true; 3236 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 3237 dc_state->stream_status[i].plane_states[j]->update_flags.raw 3238 = 0xffffffff; 3239 } 3240 } 3241 3242 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3243 amdgpu_dm_outbox_init(adev); 3244 dc_enable_dmub_outbox(adev->dm.dc); 3245 } 3246 3247 commit_params.streams = dc_state->streams; 3248 commit_params.stream_count = dc_state->stream_count; 3249 dc_exit_ips_for_hw_access(dm->dc); 3250 WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); 3251 3252 dm_gpureset_commit_state(dm->cached_dc_state, dm); 3253 3254 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 3255 3256 dc_state_release(dm->cached_dc_state); 3257 dm->cached_dc_state = NULL; 3258 3259 amdgpu_dm_irq_resume_late(adev); 3260 3261 mutex_unlock(&dm->dc_lock); 3262 3263 return 0; 3264 } 3265 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3266 dc_state_release(dm_state->context); 3267 dm_state->context = dc_state_create(dm->dc, NULL); 3268 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 3269 3270 /* Before powering on DC we need to re-initialize DMUB. */ 3271 dm_dmub_hw_resume(adev); 3272 3273 /* Re-enable outbox interrupts for DPIA. */ 3274 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3275 amdgpu_dm_outbox_init(adev); 3276 dc_enable_dmub_outbox(adev->dm.dc); 3277 } 3278 3279 /* power on hardware */ 3280 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3281 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3282 3283 /* program HPD filter */ 3284 dc_resume(dm->dc); 3285 3286 /* 3287 * early enable HPD Rx IRQ, should be done before set mode as short 3288 * pulse interrupts are used for MST 3289 */ 3290 amdgpu_dm_irq_resume_early(adev); 3291 3292 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 3293 s3_handle_mst(ddev, false); 3294 3295 /* Do detection*/ 3296 drm_connector_list_iter_begin(ddev, &iter); 3297 drm_for_each_connector_iter(connector, &iter) { 3298 3299 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3300 continue; 3301 3302 aconnector = to_amdgpu_dm_connector(connector); 3303 3304 if (!aconnector->dc_link) 3305 continue; 3306 3307 /* 3308 * this is the case when traversing through already created end sink 3309 * MST connectors, should be skipped 3310 */ 3311 if (aconnector->mst_root) 3312 continue; 3313 3314 mutex_lock(&aconnector->hpd_lock); 3315 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3316 DRM_ERROR("KMS: Failed to detect connector\n"); 3317 3318 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3319 emulated_link_detect(aconnector->dc_link); 3320 } else { 3321 mutex_lock(&dm->dc_lock); 3322 dc_exit_ips_for_hw_access(dm->dc); 3323 dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); 3324 mutex_unlock(&dm->dc_lock); 3325 } 3326 3327 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 3328 aconnector->fake_enable = false; 3329 3330 if (aconnector->dc_sink) 3331 dc_sink_release(aconnector->dc_sink); 3332 aconnector->dc_sink = NULL; 3333 amdgpu_dm_update_connector_after_detect(aconnector); 3334 mutex_unlock(&aconnector->hpd_lock); 3335 } 3336 drm_connector_list_iter_end(&iter); 3337 3338 /* Force mode set in atomic commit */ 3339 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3340 new_crtc_state->active_changed = true; 3341 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3342 reset_freesync_config_for_crtc(dm_new_crtc_state); 3343 } 3344 3345 /* 3346 * atomic_check is expected to create the dc states. We need to release 3347 * them here, since they were duplicated as part of the suspend 3348 * procedure. 3349 */ 3350 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3351 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3352 if (dm_new_crtc_state->stream) { 3353 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 3354 dc_stream_release(dm_new_crtc_state->stream); 3355 dm_new_crtc_state->stream = NULL; 3356 } 3357 dm_new_crtc_state->base.color_mgmt_changed = true; 3358 } 3359 3360 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 3361 dm_new_plane_state = to_dm_plane_state(new_plane_state); 3362 if (dm_new_plane_state->dc_state) { 3363 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 3364 dc_plane_state_release(dm_new_plane_state->dc_state); 3365 dm_new_plane_state->dc_state = NULL; 3366 } 3367 } 3368 3369 drm_atomic_helper_resume(ddev, dm->cached_state); 3370 3371 dm->cached_state = NULL; 3372 3373 /* Do mst topology probing after resuming cached state*/ 3374 drm_connector_list_iter_begin(ddev, &iter); 3375 drm_for_each_connector_iter(connector, &iter) { 3376 3377 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3378 continue; 3379 3380 aconnector = to_amdgpu_dm_connector(connector); 3381 if (aconnector->dc_link->type != dc_connection_mst_branch || 3382 aconnector->mst_root) 3383 continue; 3384 3385 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3386 } 3387 drm_connector_list_iter_end(&iter); 3388 3389 amdgpu_dm_irq_resume_late(adev); 3390 3391 amdgpu_dm_smu_write_watermarks_table(adev); 3392 3393 drm_kms_helper_hotplug_event(ddev); 3394 3395 return 0; 3396 } 3397 3398 /** 3399 * DOC: DM Lifecycle 3400 * 3401 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3402 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3403 * the base driver's device list to be initialized and torn down accordingly. 3404 * 3405 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3406 */ 3407 3408 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3409 .name = "dm", 3410 .early_init = dm_early_init, 3411 .late_init = dm_late_init, 3412 .sw_init = dm_sw_init, 3413 .sw_fini = dm_sw_fini, 3414 .early_fini = amdgpu_dm_early_fini, 3415 .hw_init = dm_hw_init, 3416 .hw_fini = dm_hw_fini, 3417 .suspend = dm_suspend, 3418 .resume = dm_resume, 3419 .is_idle = dm_is_idle, 3420 .wait_for_idle = dm_wait_for_idle, 3421 .check_soft_reset = dm_check_soft_reset, 3422 .soft_reset = dm_soft_reset, 3423 .set_clockgating_state = dm_set_clockgating_state, 3424 .set_powergating_state = dm_set_powergating_state, 3425 }; 3426 3427 const struct amdgpu_ip_block_version dm_ip_block = { 3428 .type = AMD_IP_BLOCK_TYPE_DCE, 3429 .major = 1, 3430 .minor = 0, 3431 .rev = 0, 3432 .funcs = &amdgpu_dm_funcs, 3433 }; 3434 3435 3436 /** 3437 * DOC: atomic 3438 * 3439 * *WIP* 3440 */ 3441 3442 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3443 .fb_create = amdgpu_display_user_framebuffer_create, 3444 .get_format_info = amdgpu_dm_plane_get_format_info, 3445 .atomic_check = amdgpu_dm_atomic_check, 3446 .atomic_commit = drm_atomic_helper_commit, 3447 }; 3448 3449 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3450 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3451 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3452 }; 3453 3454 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3455 { 3456 struct amdgpu_dm_backlight_caps *caps; 3457 struct drm_connector *conn_base; 3458 struct amdgpu_device *adev; 3459 struct drm_luminance_range_info *luminance_range; 3460 3461 if (aconnector->bl_idx == -1 || 3462 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3463 return; 3464 3465 conn_base = &aconnector->base; 3466 adev = drm_to_adev(conn_base->dev); 3467 3468 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3469 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3470 caps->aux_support = false; 3471 3472 if (caps->ext_caps->bits.oled == 1 3473 /* 3474 * || 3475 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3476 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3477 */) 3478 caps->aux_support = true; 3479 3480 if (amdgpu_backlight == 0) 3481 caps->aux_support = false; 3482 else if (amdgpu_backlight == 1) 3483 caps->aux_support = true; 3484 if (caps->aux_support) 3485 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; 3486 3487 luminance_range = &conn_base->display_info.luminance_range; 3488 3489 if (luminance_range->max_luminance) { 3490 caps->aux_min_input_signal = luminance_range->min_luminance; 3491 caps->aux_max_input_signal = luminance_range->max_luminance; 3492 } else { 3493 caps->aux_min_input_signal = 0; 3494 caps->aux_max_input_signal = 512; 3495 } 3496 } 3497 3498 void amdgpu_dm_update_connector_after_detect( 3499 struct amdgpu_dm_connector *aconnector) 3500 { 3501 struct drm_connector *connector = &aconnector->base; 3502 struct drm_device *dev = connector->dev; 3503 struct dc_sink *sink; 3504 3505 /* MST handled by drm_mst framework */ 3506 if (aconnector->mst_mgr.mst_state == true) 3507 return; 3508 3509 sink = aconnector->dc_link->local_sink; 3510 if (sink) 3511 dc_sink_retain(sink); 3512 3513 /* 3514 * Edid mgmt connector gets first update only in mode_valid hook and then 3515 * the connector sink is set to either fake or physical sink depends on link status. 3516 * Skip if already done during boot. 3517 */ 3518 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3519 && aconnector->dc_em_sink) { 3520 3521 /* 3522 * For S3 resume with headless use eml_sink to fake stream 3523 * because on resume connector->sink is set to NULL 3524 */ 3525 mutex_lock(&dev->mode_config.mutex); 3526 3527 if (sink) { 3528 if (aconnector->dc_sink) { 3529 amdgpu_dm_update_freesync_caps(connector, NULL); 3530 /* 3531 * retain and release below are used to 3532 * bump up refcount for sink because the link doesn't point 3533 * to it anymore after disconnect, so on next crtc to connector 3534 * reshuffle by UMD we will get into unwanted dc_sink release 3535 */ 3536 dc_sink_release(aconnector->dc_sink); 3537 } 3538 aconnector->dc_sink = sink; 3539 dc_sink_retain(aconnector->dc_sink); 3540 amdgpu_dm_update_freesync_caps(connector, 3541 aconnector->drm_edid); 3542 } else { 3543 amdgpu_dm_update_freesync_caps(connector, NULL); 3544 if (!aconnector->dc_sink) { 3545 aconnector->dc_sink = aconnector->dc_em_sink; 3546 dc_sink_retain(aconnector->dc_sink); 3547 } 3548 } 3549 3550 mutex_unlock(&dev->mode_config.mutex); 3551 3552 if (sink) 3553 dc_sink_release(sink); 3554 return; 3555 } 3556 3557 /* 3558 * TODO: temporary guard to look for proper fix 3559 * if this sink is MST sink, we should not do anything 3560 */ 3561 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3562 dc_sink_release(sink); 3563 return; 3564 } 3565 3566 if (aconnector->dc_sink == sink) { 3567 /* 3568 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3569 * Do nothing!! 3570 */ 3571 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", 3572 aconnector->connector_id); 3573 if (sink) 3574 dc_sink_release(sink); 3575 return; 3576 } 3577 3578 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3579 aconnector->connector_id, aconnector->dc_sink, sink); 3580 3581 mutex_lock(&dev->mode_config.mutex); 3582 3583 /* 3584 * 1. Update status of the drm connector 3585 * 2. Send an event and let userspace tell us what to do 3586 */ 3587 if (sink) { 3588 /* 3589 * TODO: check if we still need the S3 mode update workaround. 3590 * If yes, put it here. 3591 */ 3592 if (aconnector->dc_sink) { 3593 amdgpu_dm_update_freesync_caps(connector, NULL); 3594 dc_sink_release(aconnector->dc_sink); 3595 } 3596 3597 aconnector->dc_sink = sink; 3598 dc_sink_retain(aconnector->dc_sink); 3599 if (sink->dc_edid.length == 0) { 3600 aconnector->drm_edid = NULL; 3601 if (aconnector->dc_link->aux_mode) { 3602 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3603 } 3604 } else { 3605 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; 3606 3607 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); 3608 drm_edid_connector_update(connector, aconnector->drm_edid); 3609 3610 if (aconnector->dc_link->aux_mode) 3611 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, 3612 connector->display_info.source_physical_address); 3613 } 3614 3615 if (!aconnector->timing_requested) { 3616 aconnector->timing_requested = 3617 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3618 if (!aconnector->timing_requested) 3619 drm_err(dev, 3620 "failed to create aconnector->requested_timing\n"); 3621 } 3622 3623 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 3624 update_connector_ext_caps(aconnector); 3625 } else { 3626 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3627 amdgpu_dm_update_freesync_caps(connector, NULL); 3628 aconnector->num_modes = 0; 3629 dc_sink_release(aconnector->dc_sink); 3630 aconnector->dc_sink = NULL; 3631 drm_edid_free(aconnector->drm_edid); 3632 aconnector->drm_edid = NULL; 3633 kfree(aconnector->timing_requested); 3634 aconnector->timing_requested = NULL; 3635 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3636 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3637 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3638 } 3639 3640 mutex_unlock(&dev->mode_config.mutex); 3641 3642 update_subconnector_property(aconnector); 3643 3644 if (sink) 3645 dc_sink_release(sink); 3646 } 3647 3648 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3649 { 3650 struct drm_connector *connector = &aconnector->base; 3651 struct drm_device *dev = connector->dev; 3652 enum dc_connection_type new_connection_type = dc_connection_none; 3653 struct amdgpu_device *adev = drm_to_adev(dev); 3654 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3655 struct dc *dc = aconnector->dc_link->ctx->dc; 3656 bool ret = false; 3657 3658 if (adev->dm.disable_hpd_irq) 3659 return; 3660 3661 /* 3662 * In case of failure or MST no need to update connector status or notify the OS 3663 * since (for MST case) MST does this in its own context. 3664 */ 3665 mutex_lock(&aconnector->hpd_lock); 3666 3667 if (adev->dm.hdcp_workqueue) { 3668 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3669 dm_con_state->update_hdcp = true; 3670 } 3671 if (aconnector->fake_enable) 3672 aconnector->fake_enable = false; 3673 3674 aconnector->timing_changed = false; 3675 3676 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3677 DRM_ERROR("KMS: Failed to detect connector\n"); 3678 3679 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3680 emulated_link_detect(aconnector->dc_link); 3681 3682 drm_modeset_lock_all(dev); 3683 dm_restore_drm_connector_state(dev, connector); 3684 drm_modeset_unlock_all(dev); 3685 3686 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3687 drm_kms_helper_connector_hotplug_event(connector); 3688 } else { 3689 mutex_lock(&adev->dm.dc_lock); 3690 dc_exit_ips_for_hw_access(dc); 3691 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3692 mutex_unlock(&adev->dm.dc_lock); 3693 if (ret) { 3694 amdgpu_dm_update_connector_after_detect(aconnector); 3695 3696 drm_modeset_lock_all(dev); 3697 dm_restore_drm_connector_state(dev, connector); 3698 drm_modeset_unlock_all(dev); 3699 3700 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3701 drm_kms_helper_connector_hotplug_event(connector); 3702 } 3703 } 3704 mutex_unlock(&aconnector->hpd_lock); 3705 3706 } 3707 3708 static void handle_hpd_irq(void *param) 3709 { 3710 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3711 3712 handle_hpd_irq_helper(aconnector); 3713 3714 } 3715 3716 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3717 union hpd_irq_data hpd_irq_data) 3718 { 3719 struct hpd_rx_irq_offload_work *offload_work = 3720 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3721 3722 if (!offload_work) { 3723 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3724 return; 3725 } 3726 3727 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3728 offload_work->data = hpd_irq_data; 3729 offload_work->offload_wq = offload_wq; 3730 3731 queue_work(offload_wq->wq, &offload_work->work); 3732 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3733 } 3734 3735 static void handle_hpd_rx_irq(void *param) 3736 { 3737 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3738 struct drm_connector *connector = &aconnector->base; 3739 struct drm_device *dev = connector->dev; 3740 struct dc_link *dc_link = aconnector->dc_link; 3741 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3742 bool result = false; 3743 enum dc_connection_type new_connection_type = dc_connection_none; 3744 struct amdgpu_device *adev = drm_to_adev(dev); 3745 union hpd_irq_data hpd_irq_data; 3746 bool link_loss = false; 3747 bool has_left_work = false; 3748 int idx = dc_link->link_index; 3749 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3750 struct dc *dc = aconnector->dc_link->ctx->dc; 3751 3752 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3753 3754 if (adev->dm.disable_hpd_irq) 3755 return; 3756 3757 /* 3758 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3759 * conflict, after implement i2c helper, this mutex should be 3760 * retired. 3761 */ 3762 mutex_lock(&aconnector->hpd_lock); 3763 3764 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3765 &link_loss, true, &has_left_work); 3766 3767 if (!has_left_work) 3768 goto out; 3769 3770 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3771 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3772 goto out; 3773 } 3774 3775 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3776 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3777 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3778 bool skip = false; 3779 3780 /* 3781 * DOWN_REP_MSG_RDY is also handled by polling method 3782 * mgr->cbs->poll_hpd_irq() 3783 */ 3784 spin_lock(&offload_wq->offload_lock); 3785 skip = offload_wq->is_handling_mst_msg_rdy_event; 3786 3787 if (!skip) 3788 offload_wq->is_handling_mst_msg_rdy_event = true; 3789 3790 spin_unlock(&offload_wq->offload_lock); 3791 3792 if (!skip) 3793 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3794 3795 goto out; 3796 } 3797 3798 if (link_loss) { 3799 bool skip = false; 3800 3801 spin_lock(&offload_wq->offload_lock); 3802 skip = offload_wq->is_handling_link_loss; 3803 3804 if (!skip) 3805 offload_wq->is_handling_link_loss = true; 3806 3807 spin_unlock(&offload_wq->offload_lock); 3808 3809 if (!skip) 3810 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3811 3812 goto out; 3813 } 3814 } 3815 3816 out: 3817 if (result && !is_mst_root_connector) { 3818 /* Downstream Port status changed. */ 3819 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3820 DRM_ERROR("KMS: Failed to detect connector\n"); 3821 3822 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3823 emulated_link_detect(dc_link); 3824 3825 if (aconnector->fake_enable) 3826 aconnector->fake_enable = false; 3827 3828 amdgpu_dm_update_connector_after_detect(aconnector); 3829 3830 3831 drm_modeset_lock_all(dev); 3832 dm_restore_drm_connector_state(dev, connector); 3833 drm_modeset_unlock_all(dev); 3834 3835 drm_kms_helper_connector_hotplug_event(connector); 3836 } else { 3837 bool ret = false; 3838 3839 mutex_lock(&adev->dm.dc_lock); 3840 dc_exit_ips_for_hw_access(dc); 3841 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3842 mutex_unlock(&adev->dm.dc_lock); 3843 3844 if (ret) { 3845 if (aconnector->fake_enable) 3846 aconnector->fake_enable = false; 3847 3848 amdgpu_dm_update_connector_after_detect(aconnector); 3849 3850 drm_modeset_lock_all(dev); 3851 dm_restore_drm_connector_state(dev, connector); 3852 drm_modeset_unlock_all(dev); 3853 3854 drm_kms_helper_connector_hotplug_event(connector); 3855 } 3856 } 3857 } 3858 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3859 if (adev->dm.hdcp_workqueue) 3860 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3861 } 3862 3863 if (dc_link->type != dc_connection_mst_branch) 3864 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3865 3866 mutex_unlock(&aconnector->hpd_lock); 3867 } 3868 3869 static int register_hpd_handlers(struct amdgpu_device *adev) 3870 { 3871 struct drm_device *dev = adev_to_drm(adev); 3872 struct drm_connector *connector; 3873 struct amdgpu_dm_connector *aconnector; 3874 const struct dc_link *dc_link; 3875 struct dc_interrupt_params int_params = {0}; 3876 3877 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3878 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3879 3880 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3881 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, 3882 dmub_hpd_callback, true)) { 3883 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3884 return -EINVAL; 3885 } 3886 3887 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, 3888 dmub_hpd_callback, true)) { 3889 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3890 return -EINVAL; 3891 } 3892 3893 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 3894 dmub_hpd_sense_callback, true)) { 3895 DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); 3896 return -EINVAL; 3897 } 3898 } 3899 3900 list_for_each_entry(connector, 3901 &dev->mode_config.connector_list, head) { 3902 3903 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3904 continue; 3905 3906 aconnector = to_amdgpu_dm_connector(connector); 3907 dc_link = aconnector->dc_link; 3908 3909 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3910 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3911 int_params.irq_source = dc_link->irq_source_hpd; 3912 3913 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3914 int_params.irq_source < DC_IRQ_SOURCE_HPD1 || 3915 int_params.irq_source > DC_IRQ_SOURCE_HPD6) { 3916 DRM_ERROR("Failed to register hpd irq!\n"); 3917 return -EINVAL; 3918 } 3919 3920 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3921 handle_hpd_irq, (void *) aconnector)) 3922 return -ENOMEM; 3923 } 3924 3925 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3926 3927 /* Also register for DP short pulse (hpd_rx). */ 3928 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3929 int_params.irq_source = dc_link->irq_source_hpd_rx; 3930 3931 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3932 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || 3933 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { 3934 DRM_ERROR("Failed to register hpd rx irq!\n"); 3935 return -EINVAL; 3936 } 3937 3938 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3939 handle_hpd_rx_irq, (void *) aconnector)) 3940 return -ENOMEM; 3941 } 3942 } 3943 return 0; 3944 } 3945 3946 #if defined(CONFIG_DRM_AMD_DC_SI) 3947 /* Register IRQ sources and initialize IRQ callbacks */ 3948 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3949 { 3950 struct dc *dc = adev->dm.dc; 3951 struct common_irq_params *c_irq_params; 3952 struct dc_interrupt_params int_params = {0}; 3953 int r; 3954 int i; 3955 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3956 3957 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3958 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3959 3960 /* 3961 * Actions of amdgpu_irq_add_id(): 3962 * 1. Register a set() function with base driver. 3963 * Base driver will call set() function to enable/disable an 3964 * interrupt in DC hardware. 3965 * 2. Register amdgpu_dm_irq_handler(). 3966 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3967 * coming from DC hardware. 3968 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3969 * for acknowledging and handling. 3970 */ 3971 3972 /* Use VBLANK interrupt */ 3973 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3974 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3975 if (r) { 3976 DRM_ERROR("Failed to add crtc irq id!\n"); 3977 return r; 3978 } 3979 3980 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3981 int_params.irq_source = 3982 dc_interrupt_to_irq_source(dc, i + 1, 0); 3983 3984 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 3985 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 3986 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 3987 DRM_ERROR("Failed to register vblank irq!\n"); 3988 return -EINVAL; 3989 } 3990 3991 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3992 3993 c_irq_params->adev = adev; 3994 c_irq_params->irq_src = int_params.irq_source; 3995 3996 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 3997 dm_crtc_high_irq, c_irq_params)) 3998 return -ENOMEM; 3999 } 4000 4001 /* Use GRPH_PFLIP interrupt */ 4002 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4003 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4004 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4005 if (r) { 4006 DRM_ERROR("Failed to add page flip irq id!\n"); 4007 return r; 4008 } 4009 4010 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4011 int_params.irq_source = 4012 dc_interrupt_to_irq_source(dc, i, 0); 4013 4014 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4015 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4016 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4017 DRM_ERROR("Failed to register pflip irq!\n"); 4018 return -EINVAL; 4019 } 4020 4021 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4022 4023 c_irq_params->adev = adev; 4024 c_irq_params->irq_src = int_params.irq_source; 4025 4026 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4027 dm_pflip_high_irq, c_irq_params)) 4028 return -ENOMEM; 4029 } 4030 4031 /* HPD */ 4032 r = amdgpu_irq_add_id(adev, client_id, 4033 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4034 if (r) { 4035 DRM_ERROR("Failed to add hpd irq id!\n"); 4036 return r; 4037 } 4038 4039 r = register_hpd_handlers(adev); 4040 4041 return r; 4042 } 4043 #endif 4044 4045 /* Register IRQ sources and initialize IRQ callbacks */ 4046 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 4047 { 4048 struct dc *dc = adev->dm.dc; 4049 struct common_irq_params *c_irq_params; 4050 struct dc_interrupt_params int_params = {0}; 4051 int r; 4052 int i; 4053 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4054 4055 if (adev->family >= AMDGPU_FAMILY_AI) 4056 client_id = SOC15_IH_CLIENTID_DCE; 4057 4058 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4059 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4060 4061 /* 4062 * Actions of amdgpu_irq_add_id(): 4063 * 1. Register a set() function with base driver. 4064 * Base driver will call set() function to enable/disable an 4065 * interrupt in DC hardware. 4066 * 2. Register amdgpu_dm_irq_handler(). 4067 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4068 * coming from DC hardware. 4069 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4070 * for acknowledging and handling. 4071 */ 4072 4073 /* Use VBLANK interrupt */ 4074 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 4075 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 4076 if (r) { 4077 DRM_ERROR("Failed to add crtc irq id!\n"); 4078 return r; 4079 } 4080 4081 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4082 int_params.irq_source = 4083 dc_interrupt_to_irq_source(dc, i, 0); 4084 4085 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4086 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4087 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4088 DRM_ERROR("Failed to register vblank irq!\n"); 4089 return -EINVAL; 4090 } 4091 4092 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4093 4094 c_irq_params->adev = adev; 4095 c_irq_params->irq_src = int_params.irq_source; 4096 4097 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4098 dm_crtc_high_irq, c_irq_params)) 4099 return -ENOMEM; 4100 } 4101 4102 /* Use VUPDATE interrupt */ 4103 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 4104 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 4105 if (r) { 4106 DRM_ERROR("Failed to add vupdate irq id!\n"); 4107 return r; 4108 } 4109 4110 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4111 int_params.irq_source = 4112 dc_interrupt_to_irq_source(dc, i, 0); 4113 4114 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4115 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4116 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4117 DRM_ERROR("Failed to register vupdate irq!\n"); 4118 return -EINVAL; 4119 } 4120 4121 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4122 4123 c_irq_params->adev = adev; 4124 c_irq_params->irq_src = int_params.irq_source; 4125 4126 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4127 dm_vupdate_high_irq, c_irq_params)) 4128 return -ENOMEM; 4129 } 4130 4131 /* Use GRPH_PFLIP interrupt */ 4132 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4133 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4134 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4135 if (r) { 4136 DRM_ERROR("Failed to add page flip irq id!\n"); 4137 return r; 4138 } 4139 4140 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4141 int_params.irq_source = 4142 dc_interrupt_to_irq_source(dc, i, 0); 4143 4144 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4145 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4146 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4147 DRM_ERROR("Failed to register pflip irq!\n"); 4148 return -EINVAL; 4149 } 4150 4151 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4152 4153 c_irq_params->adev = adev; 4154 c_irq_params->irq_src = int_params.irq_source; 4155 4156 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4157 dm_pflip_high_irq, c_irq_params)) 4158 return -ENOMEM; 4159 } 4160 4161 /* HPD */ 4162 r = amdgpu_irq_add_id(adev, client_id, 4163 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4164 if (r) { 4165 DRM_ERROR("Failed to add hpd irq id!\n"); 4166 return r; 4167 } 4168 4169 r = register_hpd_handlers(adev); 4170 4171 return r; 4172 } 4173 4174 /* Register IRQ sources and initialize IRQ callbacks */ 4175 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 4176 { 4177 struct dc *dc = adev->dm.dc; 4178 struct common_irq_params *c_irq_params; 4179 struct dc_interrupt_params int_params = {0}; 4180 int r; 4181 int i; 4182 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4183 static const unsigned int vrtl_int_srcid[] = { 4184 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 4185 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 4186 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 4187 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 4188 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 4189 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 4190 }; 4191 #endif 4192 4193 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4194 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4195 4196 /* 4197 * Actions of amdgpu_irq_add_id(): 4198 * 1. Register a set() function with base driver. 4199 * Base driver will call set() function to enable/disable an 4200 * interrupt in DC hardware. 4201 * 2. Register amdgpu_dm_irq_handler(). 4202 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4203 * coming from DC hardware. 4204 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4205 * for acknowledging and handling. 4206 */ 4207 4208 /* Use VSTARTUP interrupt */ 4209 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 4210 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 4211 i++) { 4212 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 4213 4214 if (r) { 4215 DRM_ERROR("Failed to add crtc irq id!\n"); 4216 return r; 4217 } 4218 4219 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4220 int_params.irq_source = 4221 dc_interrupt_to_irq_source(dc, i, 0); 4222 4223 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4224 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4225 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4226 DRM_ERROR("Failed to register vblank irq!\n"); 4227 return -EINVAL; 4228 } 4229 4230 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4231 4232 c_irq_params->adev = adev; 4233 c_irq_params->irq_src = int_params.irq_source; 4234 4235 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4236 dm_crtc_high_irq, c_irq_params)) 4237 return -ENOMEM; 4238 } 4239 4240 /* Use otg vertical line interrupt */ 4241 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4242 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 4243 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 4244 vrtl_int_srcid[i], &adev->vline0_irq); 4245 4246 if (r) { 4247 DRM_ERROR("Failed to add vline0 irq id!\n"); 4248 return r; 4249 } 4250 4251 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4252 int_params.irq_source = 4253 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 4254 4255 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4256 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || 4257 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { 4258 DRM_ERROR("Failed to register vline0 irq!\n"); 4259 return -EINVAL; 4260 } 4261 4262 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 4263 - DC_IRQ_SOURCE_DC1_VLINE0]; 4264 4265 c_irq_params->adev = adev; 4266 c_irq_params->irq_src = int_params.irq_source; 4267 4268 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4269 dm_dcn_vertical_interrupt0_high_irq, 4270 c_irq_params)) 4271 return -ENOMEM; 4272 } 4273 #endif 4274 4275 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 4276 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 4277 * to trigger at end of each vblank, regardless of state of the lock, 4278 * matching DCE behaviour. 4279 */ 4280 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 4281 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 4282 i++) { 4283 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 4284 4285 if (r) { 4286 DRM_ERROR("Failed to add vupdate irq id!\n"); 4287 return r; 4288 } 4289 4290 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4291 int_params.irq_source = 4292 dc_interrupt_to_irq_source(dc, i, 0); 4293 4294 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4295 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4296 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4297 DRM_ERROR("Failed to register vupdate irq!\n"); 4298 return -EINVAL; 4299 } 4300 4301 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4302 4303 c_irq_params->adev = adev; 4304 c_irq_params->irq_src = int_params.irq_source; 4305 4306 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4307 dm_vupdate_high_irq, c_irq_params)) 4308 return -ENOMEM; 4309 } 4310 4311 /* Use GRPH_PFLIP interrupt */ 4312 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 4313 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 4314 i++) { 4315 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 4316 if (r) { 4317 DRM_ERROR("Failed to add page flip irq id!\n"); 4318 return r; 4319 } 4320 4321 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4322 int_params.irq_source = 4323 dc_interrupt_to_irq_source(dc, i, 0); 4324 4325 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4326 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4327 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4328 DRM_ERROR("Failed to register pflip irq!\n"); 4329 return -EINVAL; 4330 } 4331 4332 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4333 4334 c_irq_params->adev = adev; 4335 c_irq_params->irq_src = int_params.irq_source; 4336 4337 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4338 dm_pflip_high_irq, c_irq_params)) 4339 return -ENOMEM; 4340 } 4341 4342 /* HPD */ 4343 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 4344 &adev->hpd_irq); 4345 if (r) { 4346 DRM_ERROR("Failed to add hpd irq id!\n"); 4347 return r; 4348 } 4349 4350 r = register_hpd_handlers(adev); 4351 4352 return r; 4353 } 4354 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 4355 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 4356 { 4357 struct dc *dc = adev->dm.dc; 4358 struct common_irq_params *c_irq_params; 4359 struct dc_interrupt_params int_params = {0}; 4360 int r, i; 4361 4362 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4363 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4364 4365 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 4366 &adev->dmub_outbox_irq); 4367 if (r) { 4368 DRM_ERROR("Failed to add outbox irq id!\n"); 4369 return r; 4370 } 4371 4372 if (dc->ctx->dmub_srv) { 4373 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 4374 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4375 int_params.irq_source = 4376 dc_interrupt_to_irq_source(dc, i, 0); 4377 4378 c_irq_params = &adev->dm.dmub_outbox_params[0]; 4379 4380 c_irq_params->adev = adev; 4381 c_irq_params->irq_src = int_params.irq_source; 4382 4383 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4384 dm_dmub_outbox1_low_irq, c_irq_params)) 4385 return -ENOMEM; 4386 } 4387 4388 return 0; 4389 } 4390 4391 /* 4392 * Acquires the lock for the atomic state object and returns 4393 * the new atomic state. 4394 * 4395 * This should only be called during atomic check. 4396 */ 4397 int dm_atomic_get_state(struct drm_atomic_state *state, 4398 struct dm_atomic_state **dm_state) 4399 { 4400 struct drm_device *dev = state->dev; 4401 struct amdgpu_device *adev = drm_to_adev(dev); 4402 struct amdgpu_display_manager *dm = &adev->dm; 4403 struct drm_private_state *priv_state; 4404 4405 if (*dm_state) 4406 return 0; 4407 4408 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 4409 if (IS_ERR(priv_state)) 4410 return PTR_ERR(priv_state); 4411 4412 *dm_state = to_dm_atomic_state(priv_state); 4413 4414 return 0; 4415 } 4416 4417 static struct dm_atomic_state * 4418 dm_atomic_get_new_state(struct drm_atomic_state *state) 4419 { 4420 struct drm_device *dev = state->dev; 4421 struct amdgpu_device *adev = drm_to_adev(dev); 4422 struct amdgpu_display_manager *dm = &adev->dm; 4423 struct drm_private_obj *obj; 4424 struct drm_private_state *new_obj_state; 4425 int i; 4426 4427 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 4428 if (obj->funcs == dm->atomic_obj.funcs) 4429 return to_dm_atomic_state(new_obj_state); 4430 } 4431 4432 return NULL; 4433 } 4434 4435 static struct drm_private_state * 4436 dm_atomic_duplicate_state(struct drm_private_obj *obj) 4437 { 4438 struct dm_atomic_state *old_state, *new_state; 4439 4440 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 4441 if (!new_state) 4442 return NULL; 4443 4444 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 4445 4446 old_state = to_dm_atomic_state(obj->state); 4447 4448 if (old_state && old_state->context) 4449 new_state->context = dc_state_create_copy(old_state->context); 4450 4451 if (!new_state->context) { 4452 kfree(new_state); 4453 return NULL; 4454 } 4455 4456 return &new_state->base; 4457 } 4458 4459 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 4460 struct drm_private_state *state) 4461 { 4462 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4463 4464 if (dm_state && dm_state->context) 4465 dc_state_release(dm_state->context); 4466 4467 kfree(dm_state); 4468 } 4469 4470 static struct drm_private_state_funcs dm_atomic_state_funcs = { 4471 .atomic_duplicate_state = dm_atomic_duplicate_state, 4472 .atomic_destroy_state = dm_atomic_destroy_state, 4473 }; 4474 4475 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 4476 { 4477 struct dm_atomic_state *state; 4478 int r; 4479 4480 adev->mode_info.mode_config_initialized = true; 4481 4482 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 4483 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 4484 4485 adev_to_drm(adev)->mode_config.max_width = 16384; 4486 adev_to_drm(adev)->mode_config.max_height = 16384; 4487 4488 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4489 if (adev->asic_type == CHIP_HAWAII) 4490 /* disable prefer shadow for now due to hibernation issues */ 4491 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4492 else 4493 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4494 /* indicates support for immediate flip */ 4495 adev_to_drm(adev)->mode_config.async_page_flip = true; 4496 4497 state = kzalloc(sizeof(*state), GFP_KERNEL); 4498 if (!state) 4499 return -ENOMEM; 4500 4501 state->context = dc_state_create_current_copy(adev->dm.dc); 4502 if (!state->context) { 4503 kfree(state); 4504 return -ENOMEM; 4505 } 4506 4507 drm_atomic_private_obj_init(adev_to_drm(adev), 4508 &adev->dm.atomic_obj, 4509 &state->base, 4510 &dm_atomic_state_funcs); 4511 4512 r = amdgpu_display_modeset_create_props(adev); 4513 if (r) { 4514 dc_state_release(state->context); 4515 kfree(state); 4516 return r; 4517 } 4518 4519 #ifdef AMD_PRIVATE_COLOR 4520 if (amdgpu_dm_create_color_properties(adev)) { 4521 dc_state_release(state->context); 4522 kfree(state); 4523 return -ENOMEM; 4524 } 4525 #endif 4526 4527 r = amdgpu_dm_audio_init(adev); 4528 if (r) { 4529 dc_state_release(state->context); 4530 kfree(state); 4531 return r; 4532 } 4533 4534 return 0; 4535 } 4536 4537 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4538 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4539 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) 4540 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4541 4542 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4543 int bl_idx) 4544 { 4545 #if defined(CONFIG_ACPI) 4546 struct amdgpu_dm_backlight_caps caps; 4547 4548 memset(&caps, 0, sizeof(caps)); 4549 4550 if (dm->backlight_caps[bl_idx].caps_valid) 4551 return; 4552 4553 amdgpu_acpi_get_backlight_caps(&caps); 4554 4555 /* validate the firmware value is sane */ 4556 if (caps.caps_valid) { 4557 int spread = caps.max_input_signal - caps.min_input_signal; 4558 4559 if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4560 caps.min_input_signal < 0 || 4561 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4562 spread < AMDGPU_DM_MIN_SPREAD) { 4563 DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", 4564 caps.min_input_signal, caps.max_input_signal); 4565 caps.caps_valid = false; 4566 } 4567 } 4568 4569 if (caps.caps_valid) { 4570 dm->backlight_caps[bl_idx].caps_valid = true; 4571 if (caps.aux_support) 4572 return; 4573 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4574 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4575 } else { 4576 dm->backlight_caps[bl_idx].min_input_signal = 4577 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4578 dm->backlight_caps[bl_idx].max_input_signal = 4579 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4580 } 4581 #else 4582 if (dm->backlight_caps[bl_idx].aux_support) 4583 return; 4584 4585 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4586 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4587 #endif 4588 } 4589 4590 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4591 unsigned int *min, unsigned int *max) 4592 { 4593 if (!caps) 4594 return 0; 4595 4596 if (caps->aux_support) { 4597 // Firmware limits are in nits, DC API wants millinits. 4598 *max = 1000 * caps->aux_max_input_signal; 4599 *min = 1000 * caps->aux_min_input_signal; 4600 } else { 4601 // Firmware limits are 8-bit, PWM control is 16-bit. 4602 *max = 0x101 * caps->max_input_signal; 4603 *min = 0x101 * caps->min_input_signal; 4604 } 4605 return 1; 4606 } 4607 4608 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4609 uint32_t brightness) 4610 { 4611 unsigned int min, max; 4612 4613 if (!get_brightness_range(caps, &min, &max)) 4614 return brightness; 4615 4616 // Rescale 0..255 to min..max 4617 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4618 AMDGPU_MAX_BL_LEVEL); 4619 } 4620 4621 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4622 uint32_t brightness) 4623 { 4624 unsigned int min, max; 4625 4626 if (!get_brightness_range(caps, &min, &max)) 4627 return brightness; 4628 4629 if (brightness < min) 4630 return 0; 4631 // Rescale min..max to 0..255 4632 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4633 max - min); 4634 } 4635 4636 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4637 int bl_idx, 4638 u32 user_brightness) 4639 { 4640 struct amdgpu_dm_backlight_caps caps; 4641 struct dc_link *link; 4642 u32 brightness; 4643 bool rc, reallow_idle = false; 4644 4645 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4646 caps = dm->backlight_caps[bl_idx]; 4647 4648 dm->brightness[bl_idx] = user_brightness; 4649 /* update scratch register */ 4650 if (bl_idx == 0) 4651 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4652 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4653 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4654 4655 /* Change brightness based on AUX property */ 4656 mutex_lock(&dm->dc_lock); 4657 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { 4658 dc_allow_idle_optimizations(dm->dc, false); 4659 reallow_idle = true; 4660 } 4661 4662 if (caps.aux_support) { 4663 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4664 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4665 if (!rc) 4666 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4667 } else { 4668 struct set_backlight_level_params backlight_level_params = { 0 }; 4669 4670 backlight_level_params.backlight_pwm_u16_16 = brightness; 4671 backlight_level_params.transition_time_in_ms = 0; 4672 4673 rc = dc_link_set_backlight_level(link, &backlight_level_params); 4674 if (!rc) 4675 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4676 } 4677 4678 if (dm->dc->caps.ips_support && reallow_idle) 4679 dc_allow_idle_optimizations(dm->dc, true); 4680 4681 mutex_unlock(&dm->dc_lock); 4682 4683 if (rc) 4684 dm->actual_brightness[bl_idx] = user_brightness; 4685 } 4686 4687 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4688 { 4689 struct amdgpu_display_manager *dm = bl_get_data(bd); 4690 int i; 4691 4692 for (i = 0; i < dm->num_of_edps; i++) { 4693 if (bd == dm->backlight_dev[i]) 4694 break; 4695 } 4696 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4697 i = 0; 4698 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4699 4700 return 0; 4701 } 4702 4703 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4704 int bl_idx) 4705 { 4706 int ret; 4707 struct amdgpu_dm_backlight_caps caps; 4708 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4709 4710 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4711 caps = dm->backlight_caps[bl_idx]; 4712 4713 if (caps.aux_support) { 4714 u32 avg, peak; 4715 bool rc; 4716 4717 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4718 if (!rc) 4719 return dm->brightness[bl_idx]; 4720 return convert_brightness_to_user(&caps, avg); 4721 } 4722 4723 ret = dc_link_get_backlight_level(link); 4724 4725 if (ret == DC_ERROR_UNEXPECTED) 4726 return dm->brightness[bl_idx]; 4727 4728 return convert_brightness_to_user(&caps, ret); 4729 } 4730 4731 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4732 { 4733 struct amdgpu_display_manager *dm = bl_get_data(bd); 4734 int i; 4735 4736 for (i = 0; i < dm->num_of_edps; i++) { 4737 if (bd == dm->backlight_dev[i]) 4738 break; 4739 } 4740 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4741 i = 0; 4742 return amdgpu_dm_backlight_get_level(dm, i); 4743 } 4744 4745 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4746 .options = BL_CORE_SUSPENDRESUME, 4747 .get_brightness = amdgpu_dm_backlight_get_brightness, 4748 .update_status = amdgpu_dm_backlight_update_status, 4749 }; 4750 4751 static void 4752 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4753 { 4754 struct drm_device *drm = aconnector->base.dev; 4755 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4756 struct backlight_properties props = { 0 }; 4757 struct amdgpu_dm_backlight_caps caps = { 0 }; 4758 char bl_name[16]; 4759 4760 if (aconnector->bl_idx == -1) 4761 return; 4762 4763 if (!acpi_video_backlight_use_native()) { 4764 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4765 /* Try registering an ACPI video backlight device instead. */ 4766 acpi_video_register_backlight(); 4767 return; 4768 } 4769 4770 amdgpu_acpi_get_backlight_caps(&caps); 4771 if (caps.caps_valid) { 4772 if (power_supply_is_system_supplied() > 0) 4773 props.brightness = caps.ac_level; 4774 else 4775 props.brightness = caps.dc_level; 4776 } else 4777 props.brightness = AMDGPU_MAX_BL_LEVEL; 4778 4779 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4780 props.type = BACKLIGHT_RAW; 4781 4782 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4783 drm->primary->index + aconnector->bl_idx); 4784 4785 dm->backlight_dev[aconnector->bl_idx] = 4786 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4787 &amdgpu_dm_backlight_ops, &props); 4788 4789 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4790 DRM_ERROR("DM: Backlight registration failed!\n"); 4791 dm->backlight_dev[aconnector->bl_idx] = NULL; 4792 } else 4793 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4794 } 4795 4796 static int initialize_plane(struct amdgpu_display_manager *dm, 4797 struct amdgpu_mode_info *mode_info, int plane_id, 4798 enum drm_plane_type plane_type, 4799 const struct dc_plane_cap *plane_cap) 4800 { 4801 struct drm_plane *plane; 4802 unsigned long possible_crtcs; 4803 int ret = 0; 4804 4805 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4806 if (!plane) { 4807 DRM_ERROR("KMS: Failed to allocate plane\n"); 4808 return -ENOMEM; 4809 } 4810 plane->type = plane_type; 4811 4812 /* 4813 * HACK: IGT tests expect that the primary plane for a CRTC 4814 * can only have one possible CRTC. Only expose support for 4815 * any CRTC if they're not going to be used as a primary plane 4816 * for a CRTC - like overlay or underlay planes. 4817 */ 4818 possible_crtcs = 1 << plane_id; 4819 if (plane_id >= dm->dc->caps.max_streams) 4820 possible_crtcs = 0xff; 4821 4822 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4823 4824 if (ret) { 4825 DRM_ERROR("KMS: Failed to initialize plane\n"); 4826 kfree(plane); 4827 return ret; 4828 } 4829 4830 if (mode_info) 4831 mode_info->planes[plane_id] = plane; 4832 4833 return ret; 4834 } 4835 4836 4837 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4838 struct amdgpu_dm_connector *aconnector) 4839 { 4840 struct dc_link *link = aconnector->dc_link; 4841 int bl_idx = dm->num_of_edps; 4842 4843 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4844 link->type == dc_connection_none) 4845 return; 4846 4847 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4848 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4849 return; 4850 } 4851 4852 aconnector->bl_idx = bl_idx; 4853 4854 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4855 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4856 dm->backlight_link[bl_idx] = link; 4857 dm->num_of_edps++; 4858 4859 update_connector_ext_caps(aconnector); 4860 } 4861 4862 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4863 4864 /* 4865 * In this architecture, the association 4866 * connector -> encoder -> crtc 4867 * id not really requried. The crtc and connector will hold the 4868 * display_index as an abstraction to use with DAL component 4869 * 4870 * Returns 0 on success 4871 */ 4872 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4873 { 4874 struct amdgpu_display_manager *dm = &adev->dm; 4875 s32 i; 4876 struct amdgpu_dm_connector *aconnector = NULL; 4877 struct amdgpu_encoder *aencoder = NULL; 4878 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4879 u32 link_cnt; 4880 s32 primary_planes; 4881 enum dc_connection_type new_connection_type = dc_connection_none; 4882 const struct dc_plane_cap *plane; 4883 bool psr_feature_enabled = false; 4884 bool replay_feature_enabled = false; 4885 int max_overlay = dm->dc->caps.max_slave_planes; 4886 4887 dm->display_indexes_num = dm->dc->caps.max_streams; 4888 /* Update the actual used number of crtc */ 4889 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4890 4891 amdgpu_dm_set_irq_funcs(adev); 4892 4893 link_cnt = dm->dc->caps.max_links; 4894 if (amdgpu_dm_mode_config_init(dm->adev)) { 4895 DRM_ERROR("DM: Failed to initialize mode config\n"); 4896 return -EINVAL; 4897 } 4898 4899 /* There is one primary plane per CRTC */ 4900 primary_planes = dm->dc->caps.max_streams; 4901 if (primary_planes > AMDGPU_MAX_PLANES) { 4902 DRM_ERROR("DM: Plane nums out of 6 planes\n"); 4903 return -EINVAL; 4904 } 4905 4906 /* 4907 * Initialize primary planes, implicit planes for legacy IOCTLS. 4908 * Order is reversed to match iteration order in atomic check. 4909 */ 4910 for (i = (primary_planes - 1); i >= 0; i--) { 4911 plane = &dm->dc->caps.planes[i]; 4912 4913 if (initialize_plane(dm, mode_info, i, 4914 DRM_PLANE_TYPE_PRIMARY, plane)) { 4915 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4916 goto fail; 4917 } 4918 } 4919 4920 /* 4921 * Initialize overlay planes, index starting after primary planes. 4922 * These planes have a higher DRM index than the primary planes since 4923 * they should be considered as having a higher z-order. 4924 * Order is reversed to match iteration order in atomic check. 4925 * 4926 * Only support DCN for now, and only expose one so we don't encourage 4927 * userspace to use up all the pipes. 4928 */ 4929 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4930 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4931 4932 /* Do not create overlay if MPO disabled */ 4933 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4934 break; 4935 4936 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4937 continue; 4938 4939 if (!plane->pixel_format_support.argb8888) 4940 continue; 4941 4942 if (max_overlay-- == 0) 4943 break; 4944 4945 if (initialize_plane(dm, NULL, primary_planes + i, 4946 DRM_PLANE_TYPE_OVERLAY, plane)) { 4947 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4948 goto fail; 4949 } 4950 } 4951 4952 for (i = 0; i < dm->dc->caps.max_streams; i++) 4953 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4954 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4955 goto fail; 4956 } 4957 4958 /* Use Outbox interrupt */ 4959 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4960 case IP_VERSION(3, 0, 0): 4961 case IP_VERSION(3, 1, 2): 4962 case IP_VERSION(3, 1, 3): 4963 case IP_VERSION(3, 1, 4): 4964 case IP_VERSION(3, 1, 5): 4965 case IP_VERSION(3, 1, 6): 4966 case IP_VERSION(3, 2, 0): 4967 case IP_VERSION(3, 2, 1): 4968 case IP_VERSION(2, 1, 0): 4969 case IP_VERSION(3, 5, 0): 4970 case IP_VERSION(3, 5, 1): 4971 case IP_VERSION(4, 0, 1): 4972 if (register_outbox_irq_handlers(dm->adev)) { 4973 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4974 goto fail; 4975 } 4976 break; 4977 default: 4978 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4979 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4980 } 4981 4982 /* Determine whether to enable PSR support by default. */ 4983 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4984 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4985 case IP_VERSION(3, 1, 2): 4986 case IP_VERSION(3, 1, 3): 4987 case IP_VERSION(3, 1, 4): 4988 case IP_VERSION(3, 1, 5): 4989 case IP_VERSION(3, 1, 6): 4990 case IP_VERSION(3, 2, 0): 4991 case IP_VERSION(3, 2, 1): 4992 case IP_VERSION(3, 5, 0): 4993 case IP_VERSION(3, 5, 1): 4994 case IP_VERSION(4, 0, 1): 4995 psr_feature_enabled = true; 4996 break; 4997 default: 4998 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4999 break; 5000 } 5001 } 5002 5003 /* Determine whether to enable Replay support by default. */ 5004 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 5005 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5006 case IP_VERSION(3, 1, 4): 5007 case IP_VERSION(3, 2, 0): 5008 case IP_VERSION(3, 2, 1): 5009 case IP_VERSION(3, 5, 0): 5010 case IP_VERSION(3, 5, 1): 5011 replay_feature_enabled = true; 5012 break; 5013 5014 default: 5015 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 5016 break; 5017 } 5018 } 5019 5020 if (link_cnt > MAX_LINKS) { 5021 DRM_ERROR( 5022 "KMS: Cannot support more than %d display indexes\n", 5023 MAX_LINKS); 5024 goto fail; 5025 } 5026 5027 /* loops over all connectors on the board */ 5028 for (i = 0; i < link_cnt; i++) { 5029 struct dc_link *link = NULL; 5030 5031 link = dc_get_link_at_index(dm->dc, i); 5032 5033 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5034 struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); 5035 5036 if (!wbcon) { 5037 DRM_ERROR("KMS: Failed to allocate writeback connector\n"); 5038 continue; 5039 } 5040 5041 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 5042 DRM_ERROR("KMS: Failed to initialize writeback connector\n"); 5043 kfree(wbcon); 5044 continue; 5045 } 5046 5047 link->psr_settings.psr_feature_enabled = false; 5048 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 5049 5050 continue; 5051 } 5052 5053 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 5054 if (!aconnector) 5055 goto fail; 5056 5057 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 5058 if (!aencoder) 5059 goto fail; 5060 5061 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 5062 DRM_ERROR("KMS: Failed to initialize encoder\n"); 5063 goto fail; 5064 } 5065 5066 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 5067 DRM_ERROR("KMS: Failed to initialize connector\n"); 5068 goto fail; 5069 } 5070 5071 if (dm->hpd_rx_offload_wq) 5072 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 5073 aconnector; 5074 5075 if (!dc_link_detect_connection_type(link, &new_connection_type)) 5076 DRM_ERROR("KMS: Failed to detect connector\n"); 5077 5078 if (aconnector->base.force && new_connection_type == dc_connection_none) { 5079 emulated_link_detect(link); 5080 amdgpu_dm_update_connector_after_detect(aconnector); 5081 } else { 5082 bool ret = false; 5083 5084 mutex_lock(&dm->dc_lock); 5085 dc_exit_ips_for_hw_access(dm->dc); 5086 ret = dc_link_detect(link, DETECT_REASON_BOOT); 5087 mutex_unlock(&dm->dc_lock); 5088 5089 if (ret) { 5090 amdgpu_dm_update_connector_after_detect(aconnector); 5091 setup_backlight_device(dm, aconnector); 5092 5093 /* Disable PSR if Replay can be enabled */ 5094 if (replay_feature_enabled) 5095 if (amdgpu_dm_set_replay_caps(link, aconnector)) 5096 psr_feature_enabled = false; 5097 5098 if (psr_feature_enabled) 5099 amdgpu_dm_set_psr_caps(link); 5100 } 5101 } 5102 amdgpu_set_panel_orientation(&aconnector->base); 5103 } 5104 5105 /* Software is initialized. Now we can register interrupt handlers. */ 5106 switch (adev->asic_type) { 5107 #if defined(CONFIG_DRM_AMD_DC_SI) 5108 case CHIP_TAHITI: 5109 case CHIP_PITCAIRN: 5110 case CHIP_VERDE: 5111 case CHIP_OLAND: 5112 if (dce60_register_irq_handlers(dm->adev)) { 5113 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5114 goto fail; 5115 } 5116 break; 5117 #endif 5118 case CHIP_BONAIRE: 5119 case CHIP_HAWAII: 5120 case CHIP_KAVERI: 5121 case CHIP_KABINI: 5122 case CHIP_MULLINS: 5123 case CHIP_TONGA: 5124 case CHIP_FIJI: 5125 case CHIP_CARRIZO: 5126 case CHIP_STONEY: 5127 case CHIP_POLARIS11: 5128 case CHIP_POLARIS10: 5129 case CHIP_POLARIS12: 5130 case CHIP_VEGAM: 5131 case CHIP_VEGA10: 5132 case CHIP_VEGA12: 5133 case CHIP_VEGA20: 5134 if (dce110_register_irq_handlers(dm->adev)) { 5135 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5136 goto fail; 5137 } 5138 break; 5139 default: 5140 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5141 case IP_VERSION(1, 0, 0): 5142 case IP_VERSION(1, 0, 1): 5143 case IP_VERSION(2, 0, 2): 5144 case IP_VERSION(2, 0, 3): 5145 case IP_VERSION(2, 0, 0): 5146 case IP_VERSION(2, 1, 0): 5147 case IP_VERSION(3, 0, 0): 5148 case IP_VERSION(3, 0, 2): 5149 case IP_VERSION(3, 0, 3): 5150 case IP_VERSION(3, 0, 1): 5151 case IP_VERSION(3, 1, 2): 5152 case IP_VERSION(3, 1, 3): 5153 case IP_VERSION(3, 1, 4): 5154 case IP_VERSION(3, 1, 5): 5155 case IP_VERSION(3, 1, 6): 5156 case IP_VERSION(3, 2, 0): 5157 case IP_VERSION(3, 2, 1): 5158 case IP_VERSION(3, 5, 0): 5159 case IP_VERSION(3, 5, 1): 5160 case IP_VERSION(4, 0, 1): 5161 if (dcn10_register_irq_handlers(dm->adev)) { 5162 DRM_ERROR("DM: Failed to initialize IRQ\n"); 5163 goto fail; 5164 } 5165 break; 5166 default: 5167 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 5168 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5169 goto fail; 5170 } 5171 break; 5172 } 5173 5174 return 0; 5175 fail: 5176 kfree(aencoder); 5177 kfree(aconnector); 5178 5179 return -EINVAL; 5180 } 5181 5182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 5183 { 5184 drm_atomic_private_obj_fini(&dm->atomic_obj); 5185 } 5186 5187 /****************************************************************************** 5188 * amdgpu_display_funcs functions 5189 *****************************************************************************/ 5190 5191 /* 5192 * dm_bandwidth_update - program display watermarks 5193 * 5194 * @adev: amdgpu_device pointer 5195 * 5196 * Calculate and program the display watermarks and line buffer allocation. 5197 */ 5198 static void dm_bandwidth_update(struct amdgpu_device *adev) 5199 { 5200 /* TODO: implement later */ 5201 } 5202 5203 static const struct amdgpu_display_funcs dm_display_funcs = { 5204 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 5205 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 5206 .backlight_set_level = NULL, /* never called for DC */ 5207 .backlight_get_level = NULL, /* never called for DC */ 5208 .hpd_sense = NULL,/* called unconditionally */ 5209 .hpd_set_polarity = NULL, /* called unconditionally */ 5210 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 5211 .page_flip_get_scanoutpos = 5212 dm_crtc_get_scanoutpos,/* called unconditionally */ 5213 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 5214 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 5215 }; 5216 5217 #if defined(CONFIG_DEBUG_KERNEL_DC) 5218 5219 static ssize_t s3_debug_store(struct device *device, 5220 struct device_attribute *attr, 5221 const char *buf, 5222 size_t count) 5223 { 5224 int ret; 5225 int s3_state; 5226 struct drm_device *drm_dev = dev_get_drvdata(device); 5227 struct amdgpu_device *adev = drm_to_adev(drm_dev); 5228 struct amdgpu_ip_block *ip_block; 5229 5230 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); 5231 if (!ip_block) 5232 return -EINVAL; 5233 5234 ret = kstrtoint(buf, 0, &s3_state); 5235 5236 if (ret == 0) { 5237 if (s3_state) { 5238 dm_resume(ip_block); 5239 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 5240 } else 5241 dm_suspend(ip_block); 5242 } 5243 5244 return ret == 0 ? count : 0; 5245 } 5246 5247 DEVICE_ATTR_WO(s3_debug); 5248 5249 #endif 5250 5251 static int dm_init_microcode(struct amdgpu_device *adev) 5252 { 5253 char *fw_name_dmub; 5254 int r; 5255 5256 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5257 case IP_VERSION(2, 1, 0): 5258 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 5259 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 5260 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 5261 break; 5262 case IP_VERSION(3, 0, 0): 5263 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 5264 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 5265 else 5266 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 5267 break; 5268 case IP_VERSION(3, 0, 1): 5269 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 5270 break; 5271 case IP_VERSION(3, 0, 2): 5272 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 5273 break; 5274 case IP_VERSION(3, 0, 3): 5275 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 5276 break; 5277 case IP_VERSION(3, 1, 2): 5278 case IP_VERSION(3, 1, 3): 5279 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 5280 break; 5281 case IP_VERSION(3, 1, 4): 5282 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 5283 break; 5284 case IP_VERSION(3, 1, 5): 5285 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 5286 break; 5287 case IP_VERSION(3, 1, 6): 5288 fw_name_dmub = FIRMWARE_DCN316_DMUB; 5289 break; 5290 case IP_VERSION(3, 2, 0): 5291 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 5292 break; 5293 case IP_VERSION(3, 2, 1): 5294 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 5295 break; 5296 case IP_VERSION(3, 5, 0): 5297 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 5298 break; 5299 case IP_VERSION(3, 5, 1): 5300 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 5301 break; 5302 case IP_VERSION(4, 0, 1): 5303 fw_name_dmub = FIRMWARE_DCN_401_DMUB; 5304 break; 5305 default: 5306 /* ASIC doesn't support DMUB. */ 5307 return 0; 5308 } 5309 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub); 5310 return r; 5311 } 5312 5313 static int dm_early_init(struct amdgpu_ip_block *ip_block) 5314 { 5315 struct amdgpu_device *adev = ip_block->adev; 5316 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5317 struct atom_context *ctx = mode_info->atom_context; 5318 int index = GetIndexIntoMasterTable(DATA, Object_Header); 5319 u16 data_offset; 5320 5321 /* if there is no object header, skip DM */ 5322 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 5323 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 5324 dev_info(adev->dev, "No object header, skipping DM\n"); 5325 return -ENOENT; 5326 } 5327 5328 switch (adev->asic_type) { 5329 #if defined(CONFIG_DRM_AMD_DC_SI) 5330 case CHIP_TAHITI: 5331 case CHIP_PITCAIRN: 5332 case CHIP_VERDE: 5333 adev->mode_info.num_crtc = 6; 5334 adev->mode_info.num_hpd = 6; 5335 adev->mode_info.num_dig = 6; 5336 break; 5337 case CHIP_OLAND: 5338 adev->mode_info.num_crtc = 2; 5339 adev->mode_info.num_hpd = 2; 5340 adev->mode_info.num_dig = 2; 5341 break; 5342 #endif 5343 case CHIP_BONAIRE: 5344 case CHIP_HAWAII: 5345 adev->mode_info.num_crtc = 6; 5346 adev->mode_info.num_hpd = 6; 5347 adev->mode_info.num_dig = 6; 5348 break; 5349 case CHIP_KAVERI: 5350 adev->mode_info.num_crtc = 4; 5351 adev->mode_info.num_hpd = 6; 5352 adev->mode_info.num_dig = 7; 5353 break; 5354 case CHIP_KABINI: 5355 case CHIP_MULLINS: 5356 adev->mode_info.num_crtc = 2; 5357 adev->mode_info.num_hpd = 6; 5358 adev->mode_info.num_dig = 6; 5359 break; 5360 case CHIP_FIJI: 5361 case CHIP_TONGA: 5362 adev->mode_info.num_crtc = 6; 5363 adev->mode_info.num_hpd = 6; 5364 adev->mode_info.num_dig = 7; 5365 break; 5366 case CHIP_CARRIZO: 5367 adev->mode_info.num_crtc = 3; 5368 adev->mode_info.num_hpd = 6; 5369 adev->mode_info.num_dig = 9; 5370 break; 5371 case CHIP_STONEY: 5372 adev->mode_info.num_crtc = 2; 5373 adev->mode_info.num_hpd = 6; 5374 adev->mode_info.num_dig = 9; 5375 break; 5376 case CHIP_POLARIS11: 5377 case CHIP_POLARIS12: 5378 adev->mode_info.num_crtc = 5; 5379 adev->mode_info.num_hpd = 5; 5380 adev->mode_info.num_dig = 5; 5381 break; 5382 case CHIP_POLARIS10: 5383 case CHIP_VEGAM: 5384 adev->mode_info.num_crtc = 6; 5385 adev->mode_info.num_hpd = 6; 5386 adev->mode_info.num_dig = 6; 5387 break; 5388 case CHIP_VEGA10: 5389 case CHIP_VEGA12: 5390 case CHIP_VEGA20: 5391 adev->mode_info.num_crtc = 6; 5392 adev->mode_info.num_hpd = 6; 5393 adev->mode_info.num_dig = 6; 5394 break; 5395 default: 5396 5397 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5398 case IP_VERSION(2, 0, 2): 5399 case IP_VERSION(3, 0, 0): 5400 adev->mode_info.num_crtc = 6; 5401 adev->mode_info.num_hpd = 6; 5402 adev->mode_info.num_dig = 6; 5403 break; 5404 case IP_VERSION(2, 0, 0): 5405 case IP_VERSION(3, 0, 2): 5406 adev->mode_info.num_crtc = 5; 5407 adev->mode_info.num_hpd = 5; 5408 adev->mode_info.num_dig = 5; 5409 break; 5410 case IP_VERSION(2, 0, 3): 5411 case IP_VERSION(3, 0, 3): 5412 adev->mode_info.num_crtc = 2; 5413 adev->mode_info.num_hpd = 2; 5414 adev->mode_info.num_dig = 2; 5415 break; 5416 case IP_VERSION(1, 0, 0): 5417 case IP_VERSION(1, 0, 1): 5418 case IP_VERSION(3, 0, 1): 5419 case IP_VERSION(2, 1, 0): 5420 case IP_VERSION(3, 1, 2): 5421 case IP_VERSION(3, 1, 3): 5422 case IP_VERSION(3, 1, 4): 5423 case IP_VERSION(3, 1, 5): 5424 case IP_VERSION(3, 1, 6): 5425 case IP_VERSION(3, 2, 0): 5426 case IP_VERSION(3, 2, 1): 5427 case IP_VERSION(3, 5, 0): 5428 case IP_VERSION(3, 5, 1): 5429 case IP_VERSION(4, 0, 1): 5430 adev->mode_info.num_crtc = 4; 5431 adev->mode_info.num_hpd = 4; 5432 adev->mode_info.num_dig = 4; 5433 break; 5434 default: 5435 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 5436 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5437 return -EINVAL; 5438 } 5439 break; 5440 } 5441 5442 if (adev->mode_info.funcs == NULL) 5443 adev->mode_info.funcs = &dm_display_funcs; 5444 5445 /* 5446 * Note: Do NOT change adev->audio_endpt_rreg and 5447 * adev->audio_endpt_wreg because they are initialised in 5448 * amdgpu_device_init() 5449 */ 5450 #if defined(CONFIG_DEBUG_KERNEL_DC) 5451 device_create_file( 5452 adev_to_drm(adev)->dev, 5453 &dev_attr_s3_debug); 5454 #endif 5455 adev->dc_enabled = true; 5456 5457 return dm_init_microcode(adev); 5458 } 5459 5460 static bool modereset_required(struct drm_crtc_state *crtc_state) 5461 { 5462 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 5463 } 5464 5465 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 5466 { 5467 drm_encoder_cleanup(encoder); 5468 kfree(encoder); 5469 } 5470 5471 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 5472 .destroy = amdgpu_dm_encoder_destroy, 5473 }; 5474 5475 static int 5476 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5477 const enum surface_pixel_format format, 5478 enum dc_color_space *color_space) 5479 { 5480 bool full_range; 5481 5482 *color_space = COLOR_SPACE_SRGB; 5483 5484 /* DRM color properties only affect non-RGB formats. */ 5485 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5486 return 0; 5487 5488 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5489 5490 switch (plane_state->color_encoding) { 5491 case DRM_COLOR_YCBCR_BT601: 5492 if (full_range) 5493 *color_space = COLOR_SPACE_YCBCR601; 5494 else 5495 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5496 break; 5497 5498 case DRM_COLOR_YCBCR_BT709: 5499 if (full_range) 5500 *color_space = COLOR_SPACE_YCBCR709; 5501 else 5502 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5503 break; 5504 5505 case DRM_COLOR_YCBCR_BT2020: 5506 if (full_range) 5507 *color_space = COLOR_SPACE_2020_YCBCR; 5508 else 5509 return -EINVAL; 5510 break; 5511 5512 default: 5513 return -EINVAL; 5514 } 5515 5516 return 0; 5517 } 5518 5519 static int 5520 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5521 const struct drm_plane_state *plane_state, 5522 const u64 tiling_flags, 5523 struct dc_plane_info *plane_info, 5524 struct dc_plane_address *address, 5525 bool tmz_surface, 5526 bool force_disable_dcc) 5527 { 5528 const struct drm_framebuffer *fb = plane_state->fb; 5529 const struct amdgpu_framebuffer *afb = 5530 to_amdgpu_framebuffer(plane_state->fb); 5531 int ret; 5532 5533 memset(plane_info, 0, sizeof(*plane_info)); 5534 5535 switch (fb->format->format) { 5536 case DRM_FORMAT_C8: 5537 plane_info->format = 5538 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5539 break; 5540 case DRM_FORMAT_RGB565: 5541 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5542 break; 5543 case DRM_FORMAT_XRGB8888: 5544 case DRM_FORMAT_ARGB8888: 5545 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5546 break; 5547 case DRM_FORMAT_XRGB2101010: 5548 case DRM_FORMAT_ARGB2101010: 5549 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5550 break; 5551 case DRM_FORMAT_XBGR2101010: 5552 case DRM_FORMAT_ABGR2101010: 5553 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5554 break; 5555 case DRM_FORMAT_XBGR8888: 5556 case DRM_FORMAT_ABGR8888: 5557 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5558 break; 5559 case DRM_FORMAT_NV21: 5560 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5561 break; 5562 case DRM_FORMAT_NV12: 5563 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5564 break; 5565 case DRM_FORMAT_P010: 5566 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5567 break; 5568 case DRM_FORMAT_XRGB16161616F: 5569 case DRM_FORMAT_ARGB16161616F: 5570 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5571 break; 5572 case DRM_FORMAT_XBGR16161616F: 5573 case DRM_FORMAT_ABGR16161616F: 5574 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5575 break; 5576 case DRM_FORMAT_XRGB16161616: 5577 case DRM_FORMAT_ARGB16161616: 5578 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5579 break; 5580 case DRM_FORMAT_XBGR16161616: 5581 case DRM_FORMAT_ABGR16161616: 5582 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5583 break; 5584 default: 5585 DRM_ERROR( 5586 "Unsupported screen format %p4cc\n", 5587 &fb->format->format); 5588 return -EINVAL; 5589 } 5590 5591 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5592 case DRM_MODE_ROTATE_0: 5593 plane_info->rotation = ROTATION_ANGLE_0; 5594 break; 5595 case DRM_MODE_ROTATE_90: 5596 plane_info->rotation = ROTATION_ANGLE_90; 5597 break; 5598 case DRM_MODE_ROTATE_180: 5599 plane_info->rotation = ROTATION_ANGLE_180; 5600 break; 5601 case DRM_MODE_ROTATE_270: 5602 plane_info->rotation = ROTATION_ANGLE_270; 5603 break; 5604 default: 5605 plane_info->rotation = ROTATION_ANGLE_0; 5606 break; 5607 } 5608 5609 5610 plane_info->visible = true; 5611 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5612 5613 plane_info->layer_index = plane_state->normalized_zpos; 5614 5615 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5616 &plane_info->color_space); 5617 if (ret) 5618 return ret; 5619 5620 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5621 plane_info->rotation, tiling_flags, 5622 &plane_info->tiling_info, 5623 &plane_info->plane_size, 5624 &plane_info->dcc, address, 5625 tmz_surface, force_disable_dcc); 5626 if (ret) 5627 return ret; 5628 5629 amdgpu_dm_plane_fill_blending_from_plane_state( 5630 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5631 &plane_info->global_alpha, &plane_info->global_alpha_value); 5632 5633 return 0; 5634 } 5635 5636 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5637 struct dc_plane_state *dc_plane_state, 5638 struct drm_plane_state *plane_state, 5639 struct drm_crtc_state *crtc_state) 5640 { 5641 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5642 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5643 struct dc_scaling_info scaling_info; 5644 struct dc_plane_info plane_info; 5645 int ret; 5646 bool force_disable_dcc = false; 5647 5648 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5649 if (ret) 5650 return ret; 5651 5652 dc_plane_state->src_rect = scaling_info.src_rect; 5653 dc_plane_state->dst_rect = scaling_info.dst_rect; 5654 dc_plane_state->clip_rect = scaling_info.clip_rect; 5655 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5656 5657 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5658 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5659 afb->tiling_flags, 5660 &plane_info, 5661 &dc_plane_state->address, 5662 afb->tmz_surface, 5663 force_disable_dcc); 5664 if (ret) 5665 return ret; 5666 5667 dc_plane_state->format = plane_info.format; 5668 dc_plane_state->color_space = plane_info.color_space; 5669 dc_plane_state->format = plane_info.format; 5670 dc_plane_state->plane_size = plane_info.plane_size; 5671 dc_plane_state->rotation = plane_info.rotation; 5672 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5673 dc_plane_state->stereo_format = plane_info.stereo_format; 5674 dc_plane_state->tiling_info = plane_info.tiling_info; 5675 dc_plane_state->visible = plane_info.visible; 5676 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5677 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5678 dc_plane_state->global_alpha = plane_info.global_alpha; 5679 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5680 dc_plane_state->dcc = plane_info.dcc; 5681 dc_plane_state->layer_index = plane_info.layer_index; 5682 dc_plane_state->flip_int_enabled = true; 5683 5684 /* 5685 * Always set input transfer function, since plane state is refreshed 5686 * every time. 5687 */ 5688 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, 5689 plane_state, 5690 dc_plane_state); 5691 if (ret) 5692 return ret; 5693 5694 return 0; 5695 } 5696 5697 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5698 struct rect *dirty_rect, int32_t x, 5699 s32 y, s32 width, s32 height, 5700 int *i, bool ffu) 5701 { 5702 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5703 5704 dirty_rect->x = x; 5705 dirty_rect->y = y; 5706 dirty_rect->width = width; 5707 dirty_rect->height = height; 5708 5709 if (ffu) 5710 drm_dbg(plane->dev, 5711 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5712 plane->base.id, width, height); 5713 else 5714 drm_dbg(plane->dev, 5715 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5716 plane->base.id, x, y, width, height); 5717 5718 (*i)++; 5719 } 5720 5721 /** 5722 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5723 * 5724 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5725 * remote fb 5726 * @old_plane_state: Old state of @plane 5727 * @new_plane_state: New state of @plane 5728 * @crtc_state: New state of CRTC connected to the @plane 5729 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5730 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. 5731 * If PSR SU is enabled and damage clips are available, only the regions of the screen 5732 * that have changed will be updated. If PSR SU is not enabled, 5733 * or if damage clips are not available, the entire screen will be updated. 5734 * @dirty_regions_changed: dirty regions changed 5735 * 5736 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5737 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5738 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5739 * amdgpu_dm's. 5740 * 5741 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5742 * plane with regions that require flushing to the eDP remote buffer. In 5743 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5744 * implicitly provide damage clips without any client support via the plane 5745 * bounds. 5746 */ 5747 static void fill_dc_dirty_rects(struct drm_plane *plane, 5748 struct drm_plane_state *old_plane_state, 5749 struct drm_plane_state *new_plane_state, 5750 struct drm_crtc_state *crtc_state, 5751 struct dc_flip_addrs *flip_addrs, 5752 bool is_psr_su, 5753 bool *dirty_regions_changed) 5754 { 5755 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5756 struct rect *dirty_rects = flip_addrs->dirty_rects; 5757 u32 num_clips; 5758 struct drm_mode_rect *clips; 5759 bool bb_changed; 5760 bool fb_changed; 5761 u32 i = 0; 5762 *dirty_regions_changed = false; 5763 5764 /* 5765 * Cursor plane has it's own dirty rect update interface. See 5766 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5767 */ 5768 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5769 return; 5770 5771 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5772 goto ffu; 5773 5774 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5775 clips = drm_plane_get_damage_clips(new_plane_state); 5776 5777 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && 5778 is_psr_su))) 5779 goto ffu; 5780 5781 if (!dm_crtc_state->mpo_requested) { 5782 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5783 goto ffu; 5784 5785 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5786 fill_dc_dirty_rect(new_plane_state->plane, 5787 &dirty_rects[flip_addrs->dirty_rect_count], 5788 clips->x1, clips->y1, 5789 clips->x2 - clips->x1, clips->y2 - clips->y1, 5790 &flip_addrs->dirty_rect_count, 5791 false); 5792 return; 5793 } 5794 5795 /* 5796 * MPO is requested. Add entire plane bounding box to dirty rects if 5797 * flipped to or damaged. 5798 * 5799 * If plane is moved or resized, also add old bounding box to dirty 5800 * rects. 5801 */ 5802 fb_changed = old_plane_state->fb->base.id != 5803 new_plane_state->fb->base.id; 5804 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5805 old_plane_state->crtc_y != new_plane_state->crtc_y || 5806 old_plane_state->crtc_w != new_plane_state->crtc_w || 5807 old_plane_state->crtc_h != new_plane_state->crtc_h); 5808 5809 drm_dbg(plane->dev, 5810 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5811 new_plane_state->plane->base.id, 5812 bb_changed, fb_changed, num_clips); 5813 5814 *dirty_regions_changed = bb_changed; 5815 5816 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5817 goto ffu; 5818 5819 if (bb_changed) { 5820 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5821 new_plane_state->crtc_x, 5822 new_plane_state->crtc_y, 5823 new_plane_state->crtc_w, 5824 new_plane_state->crtc_h, &i, false); 5825 5826 /* Add old plane bounding-box if plane is moved or resized */ 5827 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5828 old_plane_state->crtc_x, 5829 old_plane_state->crtc_y, 5830 old_plane_state->crtc_w, 5831 old_plane_state->crtc_h, &i, false); 5832 } 5833 5834 if (num_clips) { 5835 for (; i < num_clips; clips++) 5836 fill_dc_dirty_rect(new_plane_state->plane, 5837 &dirty_rects[i], clips->x1, 5838 clips->y1, clips->x2 - clips->x1, 5839 clips->y2 - clips->y1, &i, false); 5840 } else if (fb_changed && !bb_changed) { 5841 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5842 new_plane_state->crtc_x, 5843 new_plane_state->crtc_y, 5844 new_plane_state->crtc_w, 5845 new_plane_state->crtc_h, &i, false); 5846 } 5847 5848 flip_addrs->dirty_rect_count = i; 5849 return; 5850 5851 ffu: 5852 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5853 dm_crtc_state->base.mode.crtc_hdisplay, 5854 dm_crtc_state->base.mode.crtc_vdisplay, 5855 &flip_addrs->dirty_rect_count, true); 5856 } 5857 5858 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5859 const struct dm_connector_state *dm_state, 5860 struct dc_stream_state *stream) 5861 { 5862 enum amdgpu_rmx_type rmx_type; 5863 5864 struct rect src = { 0 }; /* viewport in composition space*/ 5865 struct rect dst = { 0 }; /* stream addressable area */ 5866 5867 /* no mode. nothing to be done */ 5868 if (!mode) 5869 return; 5870 5871 /* Full screen scaling by default */ 5872 src.width = mode->hdisplay; 5873 src.height = mode->vdisplay; 5874 dst.width = stream->timing.h_addressable; 5875 dst.height = stream->timing.v_addressable; 5876 5877 if (dm_state) { 5878 rmx_type = dm_state->scaling; 5879 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5880 if (src.width * dst.height < 5881 src.height * dst.width) { 5882 /* height needs less upscaling/more downscaling */ 5883 dst.width = src.width * 5884 dst.height / src.height; 5885 } else { 5886 /* width needs less upscaling/more downscaling */ 5887 dst.height = src.height * 5888 dst.width / src.width; 5889 } 5890 } else if (rmx_type == RMX_CENTER) { 5891 dst = src; 5892 } 5893 5894 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5895 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5896 5897 if (dm_state->underscan_enable) { 5898 dst.x += dm_state->underscan_hborder / 2; 5899 dst.y += dm_state->underscan_vborder / 2; 5900 dst.width -= dm_state->underscan_hborder; 5901 dst.height -= dm_state->underscan_vborder; 5902 } 5903 } 5904 5905 stream->src = src; 5906 stream->dst = dst; 5907 5908 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5909 dst.x, dst.y, dst.width, dst.height); 5910 5911 } 5912 5913 static enum dc_color_depth 5914 convert_color_depth_from_display_info(const struct drm_connector *connector, 5915 bool is_y420, int requested_bpc) 5916 { 5917 u8 bpc; 5918 5919 if (is_y420) { 5920 bpc = 8; 5921 5922 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5923 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5924 bpc = 16; 5925 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5926 bpc = 12; 5927 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5928 bpc = 10; 5929 } else { 5930 bpc = (uint8_t)connector->display_info.bpc; 5931 /* Assume 8 bpc by default if no bpc is specified. */ 5932 bpc = bpc ? bpc : 8; 5933 } 5934 5935 if (requested_bpc > 0) { 5936 /* 5937 * Cap display bpc based on the user requested value. 5938 * 5939 * The value for state->max_bpc may not correctly updated 5940 * depending on when the connector gets added to the state 5941 * or if this was called outside of atomic check, so it 5942 * can't be used directly. 5943 */ 5944 bpc = min_t(u8, bpc, requested_bpc); 5945 5946 /* Round down to the nearest even number. */ 5947 bpc = bpc - (bpc & 1); 5948 } 5949 5950 switch (bpc) { 5951 case 0: 5952 /* 5953 * Temporary Work around, DRM doesn't parse color depth for 5954 * EDID revision before 1.4 5955 * TODO: Fix edid parsing 5956 */ 5957 return COLOR_DEPTH_888; 5958 case 6: 5959 return COLOR_DEPTH_666; 5960 case 8: 5961 return COLOR_DEPTH_888; 5962 case 10: 5963 return COLOR_DEPTH_101010; 5964 case 12: 5965 return COLOR_DEPTH_121212; 5966 case 14: 5967 return COLOR_DEPTH_141414; 5968 case 16: 5969 return COLOR_DEPTH_161616; 5970 default: 5971 return COLOR_DEPTH_UNDEFINED; 5972 } 5973 } 5974 5975 static enum dc_aspect_ratio 5976 get_aspect_ratio(const struct drm_display_mode *mode_in) 5977 { 5978 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5979 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5980 } 5981 5982 static enum dc_color_space 5983 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5984 const struct drm_connector_state *connector_state) 5985 { 5986 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5987 5988 switch (connector_state->colorspace) { 5989 case DRM_MODE_COLORIMETRY_BT601_YCC: 5990 if (dc_crtc_timing->flags.Y_ONLY) 5991 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5992 else 5993 color_space = COLOR_SPACE_YCBCR601; 5994 break; 5995 case DRM_MODE_COLORIMETRY_BT709_YCC: 5996 if (dc_crtc_timing->flags.Y_ONLY) 5997 color_space = COLOR_SPACE_YCBCR709_LIMITED; 5998 else 5999 color_space = COLOR_SPACE_YCBCR709; 6000 break; 6001 case DRM_MODE_COLORIMETRY_OPRGB: 6002 color_space = COLOR_SPACE_ADOBERGB; 6003 break; 6004 case DRM_MODE_COLORIMETRY_BT2020_RGB: 6005 case DRM_MODE_COLORIMETRY_BT2020_YCC: 6006 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 6007 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 6008 else 6009 color_space = COLOR_SPACE_2020_YCBCR; 6010 break; 6011 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 6012 default: 6013 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 6014 color_space = COLOR_SPACE_SRGB; 6015 /* 6016 * 27030khz is the separation point between HDTV and SDTV 6017 * according to HDMI spec, we use YCbCr709 and YCbCr601 6018 * respectively 6019 */ 6020 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 6021 if (dc_crtc_timing->flags.Y_ONLY) 6022 color_space = 6023 COLOR_SPACE_YCBCR709_LIMITED; 6024 else 6025 color_space = COLOR_SPACE_YCBCR709; 6026 } else { 6027 if (dc_crtc_timing->flags.Y_ONLY) 6028 color_space = 6029 COLOR_SPACE_YCBCR601_LIMITED; 6030 else 6031 color_space = COLOR_SPACE_YCBCR601; 6032 } 6033 break; 6034 } 6035 6036 return color_space; 6037 } 6038 6039 static enum display_content_type 6040 get_output_content_type(const struct drm_connector_state *connector_state) 6041 { 6042 switch (connector_state->content_type) { 6043 default: 6044 case DRM_MODE_CONTENT_TYPE_NO_DATA: 6045 return DISPLAY_CONTENT_TYPE_NO_DATA; 6046 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 6047 return DISPLAY_CONTENT_TYPE_GRAPHICS; 6048 case DRM_MODE_CONTENT_TYPE_PHOTO: 6049 return DISPLAY_CONTENT_TYPE_PHOTO; 6050 case DRM_MODE_CONTENT_TYPE_CINEMA: 6051 return DISPLAY_CONTENT_TYPE_CINEMA; 6052 case DRM_MODE_CONTENT_TYPE_GAME: 6053 return DISPLAY_CONTENT_TYPE_GAME; 6054 } 6055 } 6056 6057 static bool adjust_colour_depth_from_display_info( 6058 struct dc_crtc_timing *timing_out, 6059 const struct drm_display_info *info) 6060 { 6061 enum dc_color_depth depth = timing_out->display_color_depth; 6062 int normalized_clk; 6063 6064 do { 6065 normalized_clk = timing_out->pix_clk_100hz / 10; 6066 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6067 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6068 normalized_clk /= 2; 6069 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6070 switch (depth) { 6071 case COLOR_DEPTH_888: 6072 break; 6073 case COLOR_DEPTH_101010: 6074 normalized_clk = (normalized_clk * 30) / 24; 6075 break; 6076 case COLOR_DEPTH_121212: 6077 normalized_clk = (normalized_clk * 36) / 24; 6078 break; 6079 case COLOR_DEPTH_161616: 6080 normalized_clk = (normalized_clk * 48) / 24; 6081 break; 6082 default: 6083 /* The above depths are the only ones valid for HDMI. */ 6084 return false; 6085 } 6086 if (normalized_clk <= info->max_tmds_clock) { 6087 timing_out->display_color_depth = depth; 6088 return true; 6089 } 6090 } while (--depth > COLOR_DEPTH_666); 6091 return false; 6092 } 6093 6094 static void fill_stream_properties_from_drm_display_mode( 6095 struct dc_stream_state *stream, 6096 const struct drm_display_mode *mode_in, 6097 const struct drm_connector *connector, 6098 const struct drm_connector_state *connector_state, 6099 const struct dc_stream_state *old_stream, 6100 int requested_bpc) 6101 { 6102 struct dc_crtc_timing *timing_out = &stream->timing; 6103 const struct drm_display_info *info = &connector->display_info; 6104 struct amdgpu_dm_connector *aconnector = NULL; 6105 struct hdmi_vendor_infoframe hv_frame; 6106 struct hdmi_avi_infoframe avi_frame; 6107 6108 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 6109 aconnector = to_amdgpu_dm_connector(connector); 6110 6111 memset(&hv_frame, 0, sizeof(hv_frame)); 6112 memset(&avi_frame, 0, sizeof(avi_frame)); 6113 6114 timing_out->h_border_left = 0; 6115 timing_out->h_border_right = 0; 6116 timing_out->v_border_top = 0; 6117 timing_out->v_border_bottom = 0; 6118 /* TODO: un-hardcode */ 6119 if (drm_mode_is_420_only(info, mode_in) 6120 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6121 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6122 else if (drm_mode_is_420_also(info, mode_in) 6123 && aconnector 6124 && aconnector->force_yuv420_output) 6125 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6126 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6127 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6128 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6129 else 6130 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6131 6132 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6133 timing_out->display_color_depth = convert_color_depth_from_display_info( 6134 connector, 6135 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6136 requested_bpc); 6137 timing_out->scan_type = SCANNING_TYPE_NODATA; 6138 timing_out->hdmi_vic = 0; 6139 6140 if (old_stream) { 6141 timing_out->vic = old_stream->timing.vic; 6142 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6143 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6144 } else { 6145 timing_out->vic = drm_match_cea_mode(mode_in); 6146 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6147 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6148 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6149 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6150 } 6151 6152 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6153 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 6154 timing_out->vic = avi_frame.video_code; 6155 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 6156 timing_out->hdmi_vic = hv_frame.vic; 6157 } 6158 6159 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 6160 timing_out->h_addressable = mode_in->hdisplay; 6161 timing_out->h_total = mode_in->htotal; 6162 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6163 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6164 timing_out->v_total = mode_in->vtotal; 6165 timing_out->v_addressable = mode_in->vdisplay; 6166 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6167 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6168 timing_out->pix_clk_100hz = mode_in->clock * 10; 6169 } else { 6170 timing_out->h_addressable = mode_in->crtc_hdisplay; 6171 timing_out->h_total = mode_in->crtc_htotal; 6172 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6173 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6174 timing_out->v_total = mode_in->crtc_vtotal; 6175 timing_out->v_addressable = mode_in->crtc_vdisplay; 6176 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6177 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6178 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6179 } 6180 6181 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6182 6183 stream->out_transfer_func.type = TF_TYPE_PREDEFINED; 6184 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; 6185 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6186 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6187 drm_mode_is_420_also(info, mode_in) && 6188 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6189 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6190 adjust_colour_depth_from_display_info(timing_out, info); 6191 } 6192 } 6193 6194 stream->output_color_space = get_output_color_space(timing_out, connector_state); 6195 stream->content_type = get_output_content_type(connector_state); 6196 } 6197 6198 static void fill_audio_info(struct audio_info *audio_info, 6199 const struct drm_connector *drm_connector, 6200 const struct dc_sink *dc_sink) 6201 { 6202 int i = 0; 6203 int cea_revision = 0; 6204 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6205 6206 audio_info->manufacture_id = edid_caps->manufacturer_id; 6207 audio_info->product_id = edid_caps->product_id; 6208 6209 cea_revision = drm_connector->display_info.cea_rev; 6210 6211 strscpy(audio_info->display_name, 6212 edid_caps->display_name, 6213 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6214 6215 if (cea_revision >= 3) { 6216 audio_info->mode_count = edid_caps->audio_mode_count; 6217 6218 for (i = 0; i < audio_info->mode_count; ++i) { 6219 audio_info->modes[i].format_code = 6220 (enum audio_format_code) 6221 (edid_caps->audio_modes[i].format_code); 6222 audio_info->modes[i].channel_count = 6223 edid_caps->audio_modes[i].channel_count; 6224 audio_info->modes[i].sample_rates.all = 6225 edid_caps->audio_modes[i].sample_rate; 6226 audio_info->modes[i].sample_size = 6227 edid_caps->audio_modes[i].sample_size; 6228 } 6229 } 6230 6231 audio_info->flags.all = edid_caps->speaker_flags; 6232 6233 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6234 if (drm_connector->latency_present[0]) { 6235 audio_info->video_latency = drm_connector->video_latency[0]; 6236 audio_info->audio_latency = drm_connector->audio_latency[0]; 6237 } 6238 6239 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6240 6241 } 6242 6243 static void 6244 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6245 struct drm_display_mode *dst_mode) 6246 { 6247 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6248 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6249 dst_mode->crtc_clock = src_mode->crtc_clock; 6250 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6251 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6252 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6253 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6254 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6255 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6256 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6257 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6258 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6259 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6260 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6261 } 6262 6263 static void 6264 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6265 const struct drm_display_mode *native_mode, 6266 bool scale_enabled) 6267 { 6268 if (scale_enabled) { 6269 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6270 } else if (native_mode->clock == drm_mode->clock && 6271 native_mode->htotal == drm_mode->htotal && 6272 native_mode->vtotal == drm_mode->vtotal) { 6273 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6274 } else { 6275 /* no scaling nor amdgpu inserted, no need to patch */ 6276 } 6277 } 6278 6279 static struct dc_sink * 6280 create_fake_sink(struct dc_link *link) 6281 { 6282 struct dc_sink_init_data sink_init_data = { 0 }; 6283 struct dc_sink *sink = NULL; 6284 6285 sink_init_data.link = link; 6286 sink_init_data.sink_signal = link->connector_signal; 6287 6288 sink = dc_sink_create(&sink_init_data); 6289 if (!sink) { 6290 DRM_ERROR("Failed to create sink!\n"); 6291 return NULL; 6292 } 6293 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6294 6295 return sink; 6296 } 6297 6298 static void set_multisync_trigger_params( 6299 struct dc_stream_state *stream) 6300 { 6301 struct dc_stream_state *master = NULL; 6302 6303 if (stream->triggered_crtc_reset.enabled) { 6304 master = stream->triggered_crtc_reset.event_source; 6305 stream->triggered_crtc_reset.event = 6306 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6307 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6308 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6309 } 6310 } 6311 6312 static void set_master_stream(struct dc_stream_state *stream_set[], 6313 int stream_count) 6314 { 6315 int j, highest_rfr = 0, master_stream = 0; 6316 6317 for (j = 0; j < stream_count; j++) { 6318 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6319 int refresh_rate = 0; 6320 6321 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6322 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6323 if (refresh_rate > highest_rfr) { 6324 highest_rfr = refresh_rate; 6325 master_stream = j; 6326 } 6327 } 6328 } 6329 for (j = 0; j < stream_count; j++) { 6330 if (stream_set[j]) 6331 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6332 } 6333 } 6334 6335 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6336 { 6337 int i = 0; 6338 struct dc_stream_state *stream; 6339 6340 if (context->stream_count < 2) 6341 return; 6342 for (i = 0; i < context->stream_count ; i++) { 6343 if (!context->streams[i]) 6344 continue; 6345 /* 6346 * TODO: add a function to read AMD VSDB bits and set 6347 * crtc_sync_master.multi_sync_enabled flag 6348 * For now it's set to false 6349 */ 6350 } 6351 6352 set_master_stream(context->streams, context->stream_count); 6353 6354 for (i = 0; i < context->stream_count ; i++) { 6355 stream = context->streams[i]; 6356 6357 if (!stream) 6358 continue; 6359 6360 set_multisync_trigger_params(stream); 6361 } 6362 } 6363 6364 /** 6365 * DOC: FreeSync Video 6366 * 6367 * When a userspace application wants to play a video, the content follows a 6368 * standard format definition that usually specifies the FPS for that format. 6369 * The below list illustrates some video format and the expected FPS, 6370 * respectively: 6371 * 6372 * - TV/NTSC (23.976 FPS) 6373 * - Cinema (24 FPS) 6374 * - TV/PAL (25 FPS) 6375 * - TV/NTSC (29.97 FPS) 6376 * - TV/NTSC (30 FPS) 6377 * - Cinema HFR (48 FPS) 6378 * - TV/PAL (50 FPS) 6379 * - Commonly used (60 FPS) 6380 * - Multiples of 24 (48,72,96 FPS) 6381 * 6382 * The list of standards video format is not huge and can be added to the 6383 * connector modeset list beforehand. With that, userspace can leverage 6384 * FreeSync to extends the front porch in order to attain the target refresh 6385 * rate. Such a switch will happen seamlessly, without screen blanking or 6386 * reprogramming of the output in any other way. If the userspace requests a 6387 * modesetting change compatible with FreeSync modes that only differ in the 6388 * refresh rate, DC will skip the full update and avoid blink during the 6389 * transition. For example, the video player can change the modesetting from 6390 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6391 * causing any display blink. This same concept can be applied to a mode 6392 * setting change. 6393 */ 6394 static struct drm_display_mode * 6395 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6396 bool use_probed_modes) 6397 { 6398 struct drm_display_mode *m, *m_pref = NULL; 6399 u16 current_refresh, highest_refresh; 6400 struct list_head *list_head = use_probed_modes ? 6401 &aconnector->base.probed_modes : 6402 &aconnector->base.modes; 6403 6404 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6405 return NULL; 6406 6407 if (aconnector->freesync_vid_base.clock != 0) 6408 return &aconnector->freesync_vid_base; 6409 6410 /* Find the preferred mode */ 6411 list_for_each_entry(m, list_head, head) { 6412 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6413 m_pref = m; 6414 break; 6415 } 6416 } 6417 6418 if (!m_pref) { 6419 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6420 m_pref = list_first_entry_or_null( 6421 &aconnector->base.modes, struct drm_display_mode, head); 6422 if (!m_pref) { 6423 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6424 return NULL; 6425 } 6426 } 6427 6428 highest_refresh = drm_mode_vrefresh(m_pref); 6429 6430 /* 6431 * Find the mode with highest refresh rate with same resolution. 6432 * For some monitors, preferred mode is not the mode with highest 6433 * supported refresh rate. 6434 */ 6435 list_for_each_entry(m, list_head, head) { 6436 current_refresh = drm_mode_vrefresh(m); 6437 6438 if (m->hdisplay == m_pref->hdisplay && 6439 m->vdisplay == m_pref->vdisplay && 6440 highest_refresh < current_refresh) { 6441 highest_refresh = current_refresh; 6442 m_pref = m; 6443 } 6444 } 6445 6446 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6447 return m_pref; 6448 } 6449 6450 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6451 struct amdgpu_dm_connector *aconnector) 6452 { 6453 struct drm_display_mode *high_mode; 6454 int timing_diff; 6455 6456 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6457 if (!high_mode || !mode) 6458 return false; 6459 6460 timing_diff = high_mode->vtotal - mode->vtotal; 6461 6462 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6463 high_mode->hdisplay != mode->hdisplay || 6464 high_mode->vdisplay != mode->vdisplay || 6465 high_mode->hsync_start != mode->hsync_start || 6466 high_mode->hsync_end != mode->hsync_end || 6467 high_mode->htotal != mode->htotal || 6468 high_mode->hskew != mode->hskew || 6469 high_mode->vscan != mode->vscan || 6470 high_mode->vsync_start - mode->vsync_start != timing_diff || 6471 high_mode->vsync_end - mode->vsync_end != timing_diff) 6472 return false; 6473 else 6474 return true; 6475 } 6476 6477 #if defined(CONFIG_DRM_AMD_DC_FP) 6478 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6479 struct dc_sink *sink, struct dc_stream_state *stream, 6480 struct dsc_dec_dpcd_caps *dsc_caps) 6481 { 6482 stream->timing.flags.DSC = 0; 6483 dsc_caps->is_dsc_supported = false; 6484 6485 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6486 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6487 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6488 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6489 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6490 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6491 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6492 dsc_caps); 6493 } 6494 } 6495 6496 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6497 struct dc_sink *sink, struct dc_stream_state *stream, 6498 struct dsc_dec_dpcd_caps *dsc_caps, 6499 uint32_t max_dsc_target_bpp_limit_override) 6500 { 6501 const struct dc_link_settings *verified_link_cap = NULL; 6502 u32 link_bw_in_kbps; 6503 u32 edp_min_bpp_x16, edp_max_bpp_x16; 6504 struct dc *dc = sink->ctx->dc; 6505 struct dc_dsc_bw_range bw_range = {0}; 6506 struct dc_dsc_config dsc_cfg = {0}; 6507 struct dc_dsc_config_options dsc_options = {0}; 6508 6509 dc_dsc_get_default_config_option(dc, &dsc_options); 6510 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6511 6512 verified_link_cap = dc_link_get_link_cap(stream->link); 6513 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6514 edp_min_bpp_x16 = 8 * 16; 6515 edp_max_bpp_x16 = 8 * 16; 6516 6517 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6518 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6519 6520 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6521 edp_min_bpp_x16 = edp_max_bpp_x16; 6522 6523 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6524 dc->debug.dsc_min_slice_height_override, 6525 edp_min_bpp_x16, edp_max_bpp_x16, 6526 dsc_caps, 6527 &stream->timing, 6528 dc_link_get_highest_encoding_format(aconnector->dc_link), 6529 &bw_range)) { 6530 6531 if (bw_range.max_kbps < link_bw_in_kbps) { 6532 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6533 dsc_caps, 6534 &dsc_options, 6535 0, 6536 &stream->timing, 6537 dc_link_get_highest_encoding_format(aconnector->dc_link), 6538 &dsc_cfg)) { 6539 stream->timing.dsc_cfg = dsc_cfg; 6540 stream->timing.flags.DSC = 1; 6541 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6542 } 6543 return; 6544 } 6545 } 6546 6547 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6548 dsc_caps, 6549 &dsc_options, 6550 link_bw_in_kbps, 6551 &stream->timing, 6552 dc_link_get_highest_encoding_format(aconnector->dc_link), 6553 &dsc_cfg)) { 6554 stream->timing.dsc_cfg = dsc_cfg; 6555 stream->timing.flags.DSC = 1; 6556 } 6557 } 6558 6559 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6560 struct dc_sink *sink, struct dc_stream_state *stream, 6561 struct dsc_dec_dpcd_caps *dsc_caps) 6562 { 6563 struct drm_connector *drm_connector = &aconnector->base; 6564 u32 link_bandwidth_kbps; 6565 struct dc *dc = sink->ctx->dc; 6566 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 6567 u32 dsc_max_supported_bw_in_kbps; 6568 u32 max_dsc_target_bpp_limit_override = 6569 drm_connector->display_info.max_dsc_bpp; 6570 struct dc_dsc_config_options dsc_options = {0}; 6571 6572 dc_dsc_get_default_config_option(dc, &dsc_options); 6573 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6574 6575 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6576 dc_link_get_link_cap(aconnector->dc_link)); 6577 6578 /* Set DSC policy according to dsc_clock_en */ 6579 dc_dsc_policy_set_enable_dsc_when_not_needed( 6580 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6581 6582 if (sink->sink_signal == SIGNAL_TYPE_EDP && 6583 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 6584 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6585 6586 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6587 6588 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6589 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6590 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6591 dsc_caps, 6592 &dsc_options, 6593 link_bandwidth_kbps, 6594 &stream->timing, 6595 dc_link_get_highest_encoding_format(aconnector->dc_link), 6596 &stream->timing.dsc_cfg)) { 6597 stream->timing.flags.DSC = 1; 6598 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", 6599 __func__, drm_connector->name); 6600 } 6601 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6602 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 6603 dc_link_get_highest_encoding_format(aconnector->dc_link)); 6604 max_supported_bw_in_kbps = link_bandwidth_kbps; 6605 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6606 6607 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6608 max_supported_bw_in_kbps > 0 && 6609 dsc_max_supported_bw_in_kbps > 0) 6610 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6611 dsc_caps, 6612 &dsc_options, 6613 dsc_max_supported_bw_in_kbps, 6614 &stream->timing, 6615 dc_link_get_highest_encoding_format(aconnector->dc_link), 6616 &stream->timing.dsc_cfg)) { 6617 stream->timing.flags.DSC = 1; 6618 DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", 6619 __func__, drm_connector->name); 6620 } 6621 } 6622 } 6623 6624 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6625 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6626 stream->timing.flags.DSC = 1; 6627 6628 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6629 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6630 6631 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6632 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6633 6634 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6635 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6636 } 6637 #endif 6638 6639 static struct dc_stream_state * 6640 create_stream_for_sink(struct drm_connector *connector, 6641 const struct drm_display_mode *drm_mode, 6642 const struct dm_connector_state *dm_state, 6643 const struct dc_stream_state *old_stream, 6644 int requested_bpc) 6645 { 6646 struct amdgpu_dm_connector *aconnector = NULL; 6647 struct drm_display_mode *preferred_mode = NULL; 6648 const struct drm_connector_state *con_state = &dm_state->base; 6649 struct dc_stream_state *stream = NULL; 6650 struct drm_display_mode mode; 6651 struct drm_display_mode saved_mode; 6652 struct drm_display_mode *freesync_mode = NULL; 6653 bool native_mode_found = false; 6654 bool recalculate_timing = false; 6655 bool scale = dm_state->scaling != RMX_OFF; 6656 int mode_refresh; 6657 int preferred_refresh = 0; 6658 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6659 #if defined(CONFIG_DRM_AMD_DC_FP) 6660 struct dsc_dec_dpcd_caps dsc_caps; 6661 #endif 6662 struct dc_link *link = NULL; 6663 struct dc_sink *sink = NULL; 6664 6665 drm_mode_init(&mode, drm_mode); 6666 memset(&saved_mode, 0, sizeof(saved_mode)); 6667 6668 if (connector == NULL) { 6669 DRM_ERROR("connector is NULL!\n"); 6670 return stream; 6671 } 6672 6673 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 6674 aconnector = NULL; 6675 aconnector = to_amdgpu_dm_connector(connector); 6676 link = aconnector->dc_link; 6677 } else { 6678 struct drm_writeback_connector *wbcon = NULL; 6679 struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 6680 6681 wbcon = drm_connector_to_writeback(connector); 6682 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 6683 link = dm_wbcon->link; 6684 } 6685 6686 if (!aconnector || !aconnector->dc_sink) { 6687 sink = create_fake_sink(link); 6688 if (!sink) 6689 return stream; 6690 6691 } else { 6692 sink = aconnector->dc_sink; 6693 dc_sink_retain(sink); 6694 } 6695 6696 stream = dc_create_stream_for_sink(sink); 6697 6698 if (stream == NULL) { 6699 DRM_ERROR("Failed to create stream for sink!\n"); 6700 goto finish; 6701 } 6702 6703 /* We leave this NULL for writeback connectors */ 6704 stream->dm_stream_context = aconnector; 6705 6706 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6707 connector->display_info.hdmi.scdc.scrambling.low_rates; 6708 6709 list_for_each_entry(preferred_mode, &connector->modes, head) { 6710 /* Search for preferred mode */ 6711 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6712 native_mode_found = true; 6713 break; 6714 } 6715 } 6716 if (!native_mode_found) 6717 preferred_mode = list_first_entry_or_null( 6718 &connector->modes, 6719 struct drm_display_mode, 6720 head); 6721 6722 mode_refresh = drm_mode_vrefresh(&mode); 6723 6724 if (preferred_mode == NULL) { 6725 /* 6726 * This may not be an error, the use case is when we have no 6727 * usermode calls to reset and set mode upon hotplug. In this 6728 * case, we call set mode ourselves to restore the previous mode 6729 * and the modelist may not be filled in time. 6730 */ 6731 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6732 } else if (aconnector) { 6733 recalculate_timing = amdgpu_freesync_vid_mode && 6734 is_freesync_video_mode(&mode, aconnector); 6735 if (recalculate_timing) { 6736 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6737 drm_mode_copy(&saved_mode, &mode); 6738 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 6739 drm_mode_copy(&mode, freesync_mode); 6740 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 6741 } else { 6742 decide_crtc_timing_for_drm_display_mode( 6743 &mode, preferred_mode, scale); 6744 6745 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6746 } 6747 } 6748 6749 if (recalculate_timing) 6750 drm_mode_set_crtcinfo(&saved_mode, 0); 6751 6752 /* 6753 * If scaling is enabled and refresh rate didn't change 6754 * we copy the vic and polarities of the old timings 6755 */ 6756 if (!scale || mode_refresh != preferred_refresh) 6757 fill_stream_properties_from_drm_display_mode( 6758 stream, &mode, connector, con_state, NULL, 6759 requested_bpc); 6760 else 6761 fill_stream_properties_from_drm_display_mode( 6762 stream, &mode, connector, con_state, old_stream, 6763 requested_bpc); 6764 6765 /* The rest isn't needed for writeback connectors */ 6766 if (!aconnector) 6767 goto finish; 6768 6769 if (aconnector->timing_changed) { 6770 drm_dbg(aconnector->base.dev, 6771 "overriding timing for automated test, bpc %d, changing to %d\n", 6772 stream->timing.display_color_depth, 6773 aconnector->timing_requested->display_color_depth); 6774 stream->timing = *aconnector->timing_requested; 6775 } 6776 6777 #if defined(CONFIG_DRM_AMD_DC_FP) 6778 /* SST DSC determination policy */ 6779 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6780 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6781 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6782 #endif 6783 6784 update_stream_scaling_settings(&mode, dm_state, stream); 6785 6786 fill_audio_info( 6787 &stream->audio_info, 6788 connector, 6789 sink); 6790 6791 update_stream_signal(stream, sink); 6792 6793 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6794 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6795 6796 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6797 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6798 stream->signal == SIGNAL_TYPE_EDP) { 6799 const struct dc_edid_caps *edid_caps; 6800 unsigned int disable_colorimetry = 0; 6801 6802 if (aconnector->dc_sink) { 6803 edid_caps = &aconnector->dc_sink->edid_caps; 6804 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; 6805 } 6806 6807 // 6808 // should decide stream support vsc sdp colorimetry capability 6809 // before building vsc info packet 6810 // 6811 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 6812 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && 6813 !disable_colorimetry; 6814 6815 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 6816 tf = TRANSFER_FUNC_GAMMA_22; 6817 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6818 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6819 6820 } 6821 finish: 6822 dc_sink_release(sink); 6823 6824 return stream; 6825 } 6826 6827 static enum drm_connector_status 6828 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6829 { 6830 bool connected; 6831 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6832 6833 /* 6834 * Notes: 6835 * 1. This interface is NOT called in context of HPD irq. 6836 * 2. This interface *is called* in context of user-mode ioctl. Which 6837 * makes it a bad place for *any* MST-related activity. 6838 */ 6839 6840 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6841 !aconnector->fake_enable) 6842 connected = (aconnector->dc_sink != NULL); 6843 else 6844 connected = (aconnector->base.force == DRM_FORCE_ON || 6845 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6846 6847 update_subconnector_property(aconnector); 6848 6849 return (connected ? connector_status_connected : 6850 connector_status_disconnected); 6851 } 6852 6853 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6854 struct drm_connector_state *connector_state, 6855 struct drm_property *property, 6856 uint64_t val) 6857 { 6858 struct drm_device *dev = connector->dev; 6859 struct amdgpu_device *adev = drm_to_adev(dev); 6860 struct dm_connector_state *dm_old_state = 6861 to_dm_connector_state(connector->state); 6862 struct dm_connector_state *dm_new_state = 6863 to_dm_connector_state(connector_state); 6864 6865 int ret = -EINVAL; 6866 6867 if (property == dev->mode_config.scaling_mode_property) { 6868 enum amdgpu_rmx_type rmx_type; 6869 6870 switch (val) { 6871 case DRM_MODE_SCALE_CENTER: 6872 rmx_type = RMX_CENTER; 6873 break; 6874 case DRM_MODE_SCALE_ASPECT: 6875 rmx_type = RMX_ASPECT; 6876 break; 6877 case DRM_MODE_SCALE_FULLSCREEN: 6878 rmx_type = RMX_FULL; 6879 break; 6880 case DRM_MODE_SCALE_NONE: 6881 default: 6882 rmx_type = RMX_OFF; 6883 break; 6884 } 6885 6886 if (dm_old_state->scaling == rmx_type) 6887 return 0; 6888 6889 dm_new_state->scaling = rmx_type; 6890 ret = 0; 6891 } else if (property == adev->mode_info.underscan_hborder_property) { 6892 dm_new_state->underscan_hborder = val; 6893 ret = 0; 6894 } else if (property == adev->mode_info.underscan_vborder_property) { 6895 dm_new_state->underscan_vborder = val; 6896 ret = 0; 6897 } else if (property == adev->mode_info.underscan_property) { 6898 dm_new_state->underscan_enable = val; 6899 ret = 0; 6900 } 6901 6902 return ret; 6903 } 6904 6905 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6906 const struct drm_connector_state *state, 6907 struct drm_property *property, 6908 uint64_t *val) 6909 { 6910 struct drm_device *dev = connector->dev; 6911 struct amdgpu_device *adev = drm_to_adev(dev); 6912 struct dm_connector_state *dm_state = 6913 to_dm_connector_state(state); 6914 int ret = -EINVAL; 6915 6916 if (property == dev->mode_config.scaling_mode_property) { 6917 switch (dm_state->scaling) { 6918 case RMX_CENTER: 6919 *val = DRM_MODE_SCALE_CENTER; 6920 break; 6921 case RMX_ASPECT: 6922 *val = DRM_MODE_SCALE_ASPECT; 6923 break; 6924 case RMX_FULL: 6925 *val = DRM_MODE_SCALE_FULLSCREEN; 6926 break; 6927 case RMX_OFF: 6928 default: 6929 *val = DRM_MODE_SCALE_NONE; 6930 break; 6931 } 6932 ret = 0; 6933 } else if (property == adev->mode_info.underscan_hborder_property) { 6934 *val = dm_state->underscan_hborder; 6935 ret = 0; 6936 } else if (property == adev->mode_info.underscan_vborder_property) { 6937 *val = dm_state->underscan_vborder; 6938 ret = 0; 6939 } else if (property == adev->mode_info.underscan_property) { 6940 *val = dm_state->underscan_enable; 6941 ret = 0; 6942 } 6943 6944 return ret; 6945 } 6946 6947 /** 6948 * DOC: panel power savings 6949 * 6950 * The display manager allows you to set your desired **panel power savings** 6951 * level (between 0-4, with 0 representing off), e.g. using the following:: 6952 * 6953 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings 6954 * 6955 * Modifying this value can have implications on color accuracy, so tread 6956 * carefully. 6957 */ 6958 6959 static ssize_t panel_power_savings_show(struct device *device, 6960 struct device_attribute *attr, 6961 char *buf) 6962 { 6963 struct drm_connector *connector = dev_get_drvdata(device); 6964 struct drm_device *dev = connector->dev; 6965 u8 val; 6966 6967 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6968 val = to_dm_connector_state(connector->state)->abm_level == 6969 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : 6970 to_dm_connector_state(connector->state)->abm_level; 6971 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6972 6973 return sysfs_emit(buf, "%u\n", val); 6974 } 6975 6976 static ssize_t panel_power_savings_store(struct device *device, 6977 struct device_attribute *attr, 6978 const char *buf, size_t count) 6979 { 6980 struct drm_connector *connector = dev_get_drvdata(device); 6981 struct drm_device *dev = connector->dev; 6982 long val; 6983 int ret; 6984 6985 ret = kstrtol(buf, 0, &val); 6986 6987 if (ret) 6988 return ret; 6989 6990 if (val < 0 || val > 4) 6991 return -EINVAL; 6992 6993 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 6994 to_dm_connector_state(connector->state)->abm_level = val ?: 6995 ABM_LEVEL_IMMEDIATE_DISABLE; 6996 drm_modeset_unlock(&dev->mode_config.connection_mutex); 6997 6998 drm_kms_helper_hotplug_event(dev); 6999 7000 return count; 7001 } 7002 7003 static DEVICE_ATTR_RW(panel_power_savings); 7004 7005 static struct attribute *amdgpu_attrs[] = { 7006 &dev_attr_panel_power_savings.attr, 7007 NULL 7008 }; 7009 7010 static const struct attribute_group amdgpu_group = { 7011 .name = "amdgpu", 7012 .attrs = amdgpu_attrs 7013 }; 7014 7015 static bool 7016 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) 7017 { 7018 if (amdgpu_dm_abm_level >= 0) 7019 return false; 7020 7021 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 7022 return false; 7023 7024 /* check for OLED panels */ 7025 if (amdgpu_dm_connector->bl_idx >= 0) { 7026 struct drm_device *drm = amdgpu_dm_connector->base.dev; 7027 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 7028 struct amdgpu_dm_backlight_caps *caps; 7029 7030 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; 7031 if (caps->aux_support) 7032 return false; 7033 } 7034 7035 return true; 7036 } 7037 7038 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7039 { 7040 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7041 7042 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) 7043 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); 7044 7045 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7046 } 7047 7048 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7049 { 7050 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7051 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7052 struct amdgpu_display_manager *dm = &adev->dm; 7053 7054 /* 7055 * Call only if mst_mgr was initialized before since it's not done 7056 * for all connector types. 7057 */ 7058 if (aconnector->mst_mgr.dev) 7059 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7060 7061 if (aconnector->bl_idx != -1) { 7062 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 7063 dm->backlight_dev[aconnector->bl_idx] = NULL; 7064 } 7065 7066 if (aconnector->dc_em_sink) 7067 dc_sink_release(aconnector->dc_em_sink); 7068 aconnector->dc_em_sink = NULL; 7069 if (aconnector->dc_sink) 7070 dc_sink_release(aconnector->dc_sink); 7071 aconnector->dc_sink = NULL; 7072 7073 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7074 drm_connector_unregister(connector); 7075 drm_connector_cleanup(connector); 7076 if (aconnector->i2c) { 7077 i2c_del_adapter(&aconnector->i2c->base); 7078 kfree(aconnector->i2c); 7079 } 7080 kfree(aconnector->dm_dp_aux.aux.name); 7081 7082 kfree(connector); 7083 } 7084 7085 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7086 { 7087 struct dm_connector_state *state = 7088 to_dm_connector_state(connector->state); 7089 7090 if (connector->state) 7091 __drm_atomic_helper_connector_destroy_state(connector->state); 7092 7093 kfree(state); 7094 7095 state = kzalloc(sizeof(*state), GFP_KERNEL); 7096 7097 if (state) { 7098 state->scaling = RMX_OFF; 7099 state->underscan_enable = false; 7100 state->underscan_hborder = 0; 7101 state->underscan_vborder = 0; 7102 state->base.max_requested_bpc = 8; 7103 state->vcpi_slots = 0; 7104 state->pbn = 0; 7105 7106 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 7107 if (amdgpu_dm_abm_level <= 0) 7108 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7109 else 7110 state->abm_level = amdgpu_dm_abm_level; 7111 } 7112 7113 __drm_atomic_helper_connector_reset(connector, &state->base); 7114 } 7115 } 7116 7117 struct drm_connector_state * 7118 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7119 { 7120 struct dm_connector_state *state = 7121 to_dm_connector_state(connector->state); 7122 7123 struct dm_connector_state *new_state = 7124 kmemdup(state, sizeof(*state), GFP_KERNEL); 7125 7126 if (!new_state) 7127 return NULL; 7128 7129 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7130 7131 new_state->freesync_capable = state->freesync_capable; 7132 new_state->abm_level = state->abm_level; 7133 new_state->scaling = state->scaling; 7134 new_state->underscan_enable = state->underscan_enable; 7135 new_state->underscan_hborder = state->underscan_hborder; 7136 new_state->underscan_vborder = state->underscan_vborder; 7137 new_state->vcpi_slots = state->vcpi_slots; 7138 new_state->pbn = state->pbn; 7139 return &new_state->base; 7140 } 7141 7142 static int 7143 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7144 { 7145 struct amdgpu_dm_connector *amdgpu_dm_connector = 7146 to_amdgpu_dm_connector(connector); 7147 int r; 7148 7149 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { 7150 r = sysfs_create_group(&connector->kdev->kobj, 7151 &amdgpu_group); 7152 if (r) 7153 return r; 7154 } 7155 7156 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 7157 7158 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7159 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7160 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7161 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7162 if (r) 7163 return r; 7164 } 7165 7166 #if defined(CONFIG_DEBUG_FS) 7167 connector_debugfs_init(amdgpu_dm_connector); 7168 #endif 7169 7170 return 0; 7171 } 7172 7173 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 7174 { 7175 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7176 struct dc_link *dc_link = aconnector->dc_link; 7177 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 7178 const struct drm_edid *drm_edid; 7179 7180 drm_edid = drm_edid_read(connector); 7181 drm_edid_connector_update(connector, drm_edid); 7182 if (!drm_edid) { 7183 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7184 return; 7185 } 7186 7187 aconnector->drm_edid = drm_edid; 7188 /* Update emulated (virtual) sink's EDID */ 7189 if (dc_em_sink && dc_link) { 7190 // FIXME: Get rid of drm_edid_raw() 7191 const struct edid *edid = drm_edid_raw(drm_edid); 7192 7193 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 7194 memmove(dc_em_sink->dc_edid.raw_edid, edid, 7195 (edid->extensions + 1) * EDID_LENGTH); 7196 dm_helpers_parse_edid_caps( 7197 dc_link, 7198 &dc_em_sink->dc_edid, 7199 &dc_em_sink->edid_caps); 7200 } 7201 } 7202 7203 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7204 .reset = amdgpu_dm_connector_funcs_reset, 7205 .detect = amdgpu_dm_connector_detect, 7206 .fill_modes = drm_helper_probe_single_connector_modes, 7207 .destroy = amdgpu_dm_connector_destroy, 7208 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7209 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7210 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7211 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7212 .late_register = amdgpu_dm_connector_late_register, 7213 .early_unregister = amdgpu_dm_connector_unregister, 7214 .force = amdgpu_dm_connector_funcs_force 7215 }; 7216 7217 static int get_modes(struct drm_connector *connector) 7218 { 7219 return amdgpu_dm_connector_get_modes(connector); 7220 } 7221 7222 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7223 { 7224 struct drm_connector *connector = &aconnector->base; 7225 struct dc_sink_init_data init_params = { 7226 .link = aconnector->dc_link, 7227 .sink_signal = SIGNAL_TYPE_VIRTUAL 7228 }; 7229 const struct drm_edid *drm_edid; 7230 const struct edid *edid; 7231 7232 drm_edid = drm_edid_read(connector); 7233 drm_edid_connector_update(connector, drm_edid); 7234 if (!drm_edid) { 7235 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 7236 return; 7237 } 7238 7239 if (connector->display_info.is_hdmi) 7240 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 7241 7242 aconnector->drm_edid = drm_edid; 7243 7244 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 7245 aconnector->dc_em_sink = dc_link_add_remote_sink( 7246 aconnector->dc_link, 7247 (uint8_t *)edid, 7248 (edid->extensions + 1) * EDID_LENGTH, 7249 &init_params); 7250 7251 if (aconnector->base.force == DRM_FORCE_ON) { 7252 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7253 aconnector->dc_link->local_sink : 7254 aconnector->dc_em_sink; 7255 if (aconnector->dc_sink) 7256 dc_sink_retain(aconnector->dc_sink); 7257 } 7258 } 7259 7260 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7261 { 7262 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7263 7264 /* 7265 * In case of headless boot with force on for DP managed connector 7266 * Those settings have to be != 0 to get initial modeset 7267 */ 7268 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7269 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7270 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7271 } 7272 7273 create_eml_sink(aconnector); 7274 } 7275 7276 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 7277 struct dc_stream_state *stream) 7278 { 7279 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 7280 struct dc_plane_state *dc_plane_state = NULL; 7281 struct dc_state *dc_state = NULL; 7282 7283 if (!stream) 7284 goto cleanup; 7285 7286 dc_plane_state = dc_create_plane_state(dc); 7287 if (!dc_plane_state) 7288 goto cleanup; 7289 7290 dc_state = dc_state_create(dc, NULL); 7291 if (!dc_state) 7292 goto cleanup; 7293 7294 /* populate stream to plane */ 7295 dc_plane_state->src_rect.height = stream->src.height; 7296 dc_plane_state->src_rect.width = stream->src.width; 7297 dc_plane_state->dst_rect.height = stream->src.height; 7298 dc_plane_state->dst_rect.width = stream->src.width; 7299 dc_plane_state->clip_rect.height = stream->src.height; 7300 dc_plane_state->clip_rect.width = stream->src.width; 7301 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 7302 dc_plane_state->plane_size.surface_size.height = stream->src.height; 7303 dc_plane_state->plane_size.surface_size.width = stream->src.width; 7304 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 7305 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 7306 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 7307 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 7308 dc_plane_state->rotation = ROTATION_ANGLE_0; 7309 dc_plane_state->is_tiling_rotated = false; 7310 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 7311 7312 dc_result = dc_validate_stream(dc, stream); 7313 if (dc_result == DC_OK) 7314 dc_result = dc_validate_plane(dc, dc_plane_state); 7315 7316 if (dc_result == DC_OK) 7317 dc_result = dc_state_add_stream(dc, dc_state, stream); 7318 7319 if (dc_result == DC_OK && !dc_state_add_plane( 7320 dc, 7321 stream, 7322 dc_plane_state, 7323 dc_state)) 7324 dc_result = DC_FAIL_ATTACH_SURFACES; 7325 7326 if (dc_result == DC_OK) 7327 dc_result = dc_validate_global_state(dc, dc_state, true); 7328 7329 cleanup: 7330 if (dc_state) 7331 dc_state_release(dc_state); 7332 7333 if (dc_plane_state) 7334 dc_plane_state_release(dc_plane_state); 7335 7336 return dc_result; 7337 } 7338 7339 struct dc_stream_state * 7340 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 7341 const struct drm_display_mode *drm_mode, 7342 const struct dm_connector_state *dm_state, 7343 const struct dc_stream_state *old_stream) 7344 { 7345 struct drm_connector *connector = &aconnector->base; 7346 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7347 struct dc_stream_state *stream; 7348 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7349 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7350 enum dc_status dc_result = DC_OK; 7351 uint8_t bpc_limit = 6; 7352 7353 if (!dm_state) 7354 return NULL; 7355 7356 if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || 7357 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 7358 bpc_limit = 8; 7359 7360 do { 7361 stream = create_stream_for_sink(connector, drm_mode, 7362 dm_state, old_stream, 7363 requested_bpc); 7364 if (stream == NULL) { 7365 DRM_ERROR("Failed to create stream for sink!\n"); 7366 break; 7367 } 7368 7369 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7370 return stream; 7371 7372 dc_result = dc_validate_stream(adev->dm.dc, stream); 7373 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 7374 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 7375 7376 if (dc_result == DC_OK) 7377 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 7378 7379 if (dc_result != DC_OK) { 7380 DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", 7381 drm_mode->hdisplay, 7382 drm_mode->vdisplay, 7383 drm_mode->clock, 7384 dc_pixel_encoding_to_str(stream->timing.pixel_encoding), 7385 dc_color_depth_to_str(stream->timing.display_color_depth), 7386 dc_status_to_str(dc_result)); 7387 7388 dc_stream_release(stream); 7389 stream = NULL; 7390 requested_bpc -= 2; /* lower bpc to retry validation */ 7391 } 7392 7393 } while (stream == NULL && requested_bpc >= bpc_limit); 7394 7395 if ((dc_result == DC_FAIL_ENC_VALIDATE || 7396 dc_result == DC_EXCEED_DONGLE_CAP) && 7397 !aconnector->force_yuv420_output) { 7398 DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", 7399 __func__, __LINE__); 7400 7401 aconnector->force_yuv420_output = true; 7402 stream = create_validate_stream_for_sink(aconnector, drm_mode, 7403 dm_state, old_stream); 7404 aconnector->force_yuv420_output = false; 7405 } 7406 7407 return stream; 7408 } 7409 7410 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7411 struct drm_display_mode *mode) 7412 { 7413 int result = MODE_ERROR; 7414 struct dc_sink *dc_sink; 7415 /* TODO: Unhardcode stream count */ 7416 struct dc_stream_state *stream; 7417 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7418 7419 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7420 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7421 return result; 7422 7423 /* 7424 * Only run this the first time mode_valid is called to initilialize 7425 * EDID mgmt 7426 */ 7427 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7428 !aconnector->dc_em_sink) 7429 handle_edid_mgmt(aconnector); 7430 7431 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7432 7433 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7434 aconnector->base.force != DRM_FORCE_ON) { 7435 DRM_ERROR("dc_sink is NULL!\n"); 7436 goto fail; 7437 } 7438 7439 drm_mode_set_crtcinfo(mode, 0); 7440 7441 stream = create_validate_stream_for_sink(aconnector, mode, 7442 to_dm_connector_state(connector->state), 7443 NULL); 7444 if (stream) { 7445 dc_stream_release(stream); 7446 result = MODE_OK; 7447 } 7448 7449 fail: 7450 /* TODO: error handling*/ 7451 return result; 7452 } 7453 7454 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7455 struct dc_info_packet *out) 7456 { 7457 struct hdmi_drm_infoframe frame; 7458 unsigned char buf[30]; /* 26 + 4 */ 7459 ssize_t len; 7460 int ret, i; 7461 7462 memset(out, 0, sizeof(*out)); 7463 7464 if (!state->hdr_output_metadata) 7465 return 0; 7466 7467 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7468 if (ret) 7469 return ret; 7470 7471 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7472 if (len < 0) 7473 return (int)len; 7474 7475 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7476 if (len != 30) 7477 return -EINVAL; 7478 7479 /* Prepare the infopacket for DC. */ 7480 switch (state->connector->connector_type) { 7481 case DRM_MODE_CONNECTOR_HDMIA: 7482 out->hb0 = 0x87; /* type */ 7483 out->hb1 = 0x01; /* version */ 7484 out->hb2 = 0x1A; /* length */ 7485 out->sb[0] = buf[3]; /* checksum */ 7486 i = 1; 7487 break; 7488 7489 case DRM_MODE_CONNECTOR_DisplayPort: 7490 case DRM_MODE_CONNECTOR_eDP: 7491 out->hb0 = 0x00; /* sdp id, zero */ 7492 out->hb1 = 0x87; /* type */ 7493 out->hb2 = 0x1D; /* payload len - 1 */ 7494 out->hb3 = (0x13 << 2); /* sdp version */ 7495 out->sb[0] = 0x01; /* version */ 7496 out->sb[1] = 0x1A; /* length */ 7497 i = 2; 7498 break; 7499 7500 default: 7501 return -EINVAL; 7502 } 7503 7504 memcpy(&out->sb[i], &buf[4], 26); 7505 out->valid = true; 7506 7507 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7508 sizeof(out->sb), false); 7509 7510 return 0; 7511 } 7512 7513 static int 7514 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7515 struct drm_atomic_state *state) 7516 { 7517 struct drm_connector_state *new_con_state = 7518 drm_atomic_get_new_connector_state(state, conn); 7519 struct drm_connector_state *old_con_state = 7520 drm_atomic_get_old_connector_state(state, conn); 7521 struct drm_crtc *crtc = new_con_state->crtc; 7522 struct drm_crtc_state *new_crtc_state; 7523 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 7524 int ret; 7525 7526 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7527 7528 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 7529 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 7530 if (ret < 0) 7531 return ret; 7532 } 7533 7534 if (!crtc) 7535 return 0; 7536 7537 if (new_con_state->colorspace != old_con_state->colorspace) { 7538 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7539 if (IS_ERR(new_crtc_state)) 7540 return PTR_ERR(new_crtc_state); 7541 7542 new_crtc_state->mode_changed = true; 7543 } 7544 7545 if (new_con_state->content_type != old_con_state->content_type) { 7546 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7547 if (IS_ERR(new_crtc_state)) 7548 return PTR_ERR(new_crtc_state); 7549 7550 new_crtc_state->mode_changed = true; 7551 } 7552 7553 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7554 struct dc_info_packet hdr_infopacket; 7555 7556 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7557 if (ret) 7558 return ret; 7559 7560 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7561 if (IS_ERR(new_crtc_state)) 7562 return PTR_ERR(new_crtc_state); 7563 7564 /* 7565 * DC considers the stream backends changed if the 7566 * static metadata changes. Forcing the modeset also 7567 * gives a simple way for userspace to switch from 7568 * 8bpc to 10bpc when setting the metadata to enter 7569 * or exit HDR. 7570 * 7571 * Changing the static metadata after it's been 7572 * set is permissible, however. So only force a 7573 * modeset if we're entering or exiting HDR. 7574 */ 7575 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 7576 !old_con_state->hdr_output_metadata || 7577 !new_con_state->hdr_output_metadata; 7578 } 7579 7580 return 0; 7581 } 7582 7583 static const struct drm_connector_helper_funcs 7584 amdgpu_dm_connector_helper_funcs = { 7585 /* 7586 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7587 * modes will be filtered by drm_mode_validate_size(), and those modes 7588 * are missing after user start lightdm. So we need to renew modes list. 7589 * in get_modes call back, not just return the modes count 7590 */ 7591 .get_modes = get_modes, 7592 .mode_valid = amdgpu_dm_connector_mode_valid, 7593 .atomic_check = amdgpu_dm_connector_atomic_check, 7594 }; 7595 7596 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7597 { 7598 7599 } 7600 7601 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 7602 { 7603 switch (display_color_depth) { 7604 case COLOR_DEPTH_666: 7605 return 6; 7606 case COLOR_DEPTH_888: 7607 return 8; 7608 case COLOR_DEPTH_101010: 7609 return 10; 7610 case COLOR_DEPTH_121212: 7611 return 12; 7612 case COLOR_DEPTH_141414: 7613 return 14; 7614 case COLOR_DEPTH_161616: 7615 return 16; 7616 default: 7617 break; 7618 } 7619 return 0; 7620 } 7621 7622 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7623 struct drm_crtc_state *crtc_state, 7624 struct drm_connector_state *conn_state) 7625 { 7626 struct drm_atomic_state *state = crtc_state->state; 7627 struct drm_connector *connector = conn_state->connector; 7628 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7629 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7630 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7631 struct drm_dp_mst_topology_mgr *mst_mgr; 7632 struct drm_dp_mst_port *mst_port; 7633 struct drm_dp_mst_topology_state *mst_state; 7634 enum dc_color_depth color_depth; 7635 int clock, bpp = 0; 7636 bool is_y420 = false; 7637 7638 if (!aconnector->mst_output_port) 7639 return 0; 7640 7641 mst_port = aconnector->mst_output_port; 7642 mst_mgr = &aconnector->mst_root->mst_mgr; 7643 7644 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7645 return 0; 7646 7647 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 7648 if (IS_ERR(mst_state)) 7649 return PTR_ERR(mst_state); 7650 7651 mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); 7652 7653 if (!state->duplicated) { 7654 int max_bpc = conn_state->max_requested_bpc; 7655 7656 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7657 aconnector->force_yuv420_output; 7658 color_depth = convert_color_depth_from_display_info(connector, 7659 is_y420, 7660 max_bpc); 7661 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7662 clock = adjusted_mode->clock; 7663 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 7664 } 7665 7666 dm_new_connector_state->vcpi_slots = 7667 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 7668 dm_new_connector_state->pbn); 7669 if (dm_new_connector_state->vcpi_slots < 0) { 7670 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7671 return dm_new_connector_state->vcpi_slots; 7672 } 7673 return 0; 7674 } 7675 7676 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7677 .disable = dm_encoder_helper_disable, 7678 .atomic_check = dm_encoder_helper_atomic_check 7679 }; 7680 7681 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7682 struct dc_state *dc_state, 7683 struct dsc_mst_fairness_vars *vars) 7684 { 7685 struct dc_stream_state *stream = NULL; 7686 struct drm_connector *connector; 7687 struct drm_connector_state *new_con_state; 7688 struct amdgpu_dm_connector *aconnector; 7689 struct dm_connector_state *dm_conn_state; 7690 int i, j, ret; 7691 int vcpi, pbn_div, pbn = 0, slot_num = 0; 7692 7693 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7694 7695 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7696 continue; 7697 7698 aconnector = to_amdgpu_dm_connector(connector); 7699 7700 if (!aconnector->mst_output_port) 7701 continue; 7702 7703 if (!new_con_state || !new_con_state->crtc) 7704 continue; 7705 7706 dm_conn_state = to_dm_connector_state(new_con_state); 7707 7708 for (j = 0; j < dc_state->stream_count; j++) { 7709 stream = dc_state->streams[j]; 7710 if (!stream) 7711 continue; 7712 7713 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 7714 break; 7715 7716 stream = NULL; 7717 } 7718 7719 if (!stream) 7720 continue; 7721 7722 pbn_div = dm_mst_get_pbn_divider(stream->link); 7723 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7724 for (j = 0; j < dc_state->stream_count; j++) { 7725 if (vars[j].aconnector == aconnector) { 7726 pbn = vars[j].pbn; 7727 break; 7728 } 7729 } 7730 7731 if (j == dc_state->stream_count || pbn_div == 0) 7732 continue; 7733 7734 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7735 7736 if (stream->timing.flags.DSC != 1) { 7737 dm_conn_state->pbn = pbn; 7738 dm_conn_state->vcpi_slots = slot_num; 7739 7740 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 7741 dm_conn_state->pbn, false); 7742 if (ret < 0) 7743 return ret; 7744 7745 continue; 7746 } 7747 7748 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 7749 if (vcpi < 0) 7750 return vcpi; 7751 7752 dm_conn_state->pbn = pbn; 7753 dm_conn_state->vcpi_slots = vcpi; 7754 } 7755 return 0; 7756 } 7757 7758 static int to_drm_connector_type(enum signal_type st) 7759 { 7760 switch (st) { 7761 case SIGNAL_TYPE_HDMI_TYPE_A: 7762 return DRM_MODE_CONNECTOR_HDMIA; 7763 case SIGNAL_TYPE_EDP: 7764 return DRM_MODE_CONNECTOR_eDP; 7765 case SIGNAL_TYPE_LVDS: 7766 return DRM_MODE_CONNECTOR_LVDS; 7767 case SIGNAL_TYPE_RGB: 7768 return DRM_MODE_CONNECTOR_VGA; 7769 case SIGNAL_TYPE_DISPLAY_PORT: 7770 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7771 return DRM_MODE_CONNECTOR_DisplayPort; 7772 case SIGNAL_TYPE_DVI_DUAL_LINK: 7773 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7774 return DRM_MODE_CONNECTOR_DVID; 7775 case SIGNAL_TYPE_VIRTUAL: 7776 return DRM_MODE_CONNECTOR_VIRTUAL; 7777 7778 default: 7779 return DRM_MODE_CONNECTOR_Unknown; 7780 } 7781 } 7782 7783 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7784 { 7785 struct drm_encoder *encoder; 7786 7787 /* There is only one encoder per connector */ 7788 drm_connector_for_each_possible_encoder(connector, encoder) 7789 return encoder; 7790 7791 return NULL; 7792 } 7793 7794 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7795 { 7796 struct drm_encoder *encoder; 7797 struct amdgpu_encoder *amdgpu_encoder; 7798 7799 encoder = amdgpu_dm_connector_to_encoder(connector); 7800 7801 if (encoder == NULL) 7802 return; 7803 7804 amdgpu_encoder = to_amdgpu_encoder(encoder); 7805 7806 amdgpu_encoder->native_mode.clock = 0; 7807 7808 if (!list_empty(&connector->probed_modes)) { 7809 struct drm_display_mode *preferred_mode = NULL; 7810 7811 list_for_each_entry(preferred_mode, 7812 &connector->probed_modes, 7813 head) { 7814 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7815 amdgpu_encoder->native_mode = *preferred_mode; 7816 7817 break; 7818 } 7819 7820 } 7821 } 7822 7823 static struct drm_display_mode * 7824 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7825 char *name, 7826 int hdisplay, int vdisplay) 7827 { 7828 struct drm_device *dev = encoder->dev; 7829 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7830 struct drm_display_mode *mode = NULL; 7831 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7832 7833 mode = drm_mode_duplicate(dev, native_mode); 7834 7835 if (mode == NULL) 7836 return NULL; 7837 7838 mode->hdisplay = hdisplay; 7839 mode->vdisplay = vdisplay; 7840 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7841 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7842 7843 return mode; 7844 7845 } 7846 7847 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7848 struct drm_connector *connector) 7849 { 7850 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7851 struct drm_display_mode *mode = NULL; 7852 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7853 struct amdgpu_dm_connector *amdgpu_dm_connector = 7854 to_amdgpu_dm_connector(connector); 7855 int i; 7856 int n; 7857 struct mode_size { 7858 char name[DRM_DISPLAY_MODE_LEN]; 7859 int w; 7860 int h; 7861 } common_modes[] = { 7862 { "640x480", 640, 480}, 7863 { "800x600", 800, 600}, 7864 { "1024x768", 1024, 768}, 7865 { "1280x720", 1280, 720}, 7866 { "1280x800", 1280, 800}, 7867 {"1280x1024", 1280, 1024}, 7868 { "1440x900", 1440, 900}, 7869 {"1680x1050", 1680, 1050}, 7870 {"1600x1200", 1600, 1200}, 7871 {"1920x1080", 1920, 1080}, 7872 {"1920x1200", 1920, 1200} 7873 }; 7874 7875 n = ARRAY_SIZE(common_modes); 7876 7877 for (i = 0; i < n; i++) { 7878 struct drm_display_mode *curmode = NULL; 7879 bool mode_existed = false; 7880 7881 if (common_modes[i].w > native_mode->hdisplay || 7882 common_modes[i].h > native_mode->vdisplay || 7883 (common_modes[i].w == native_mode->hdisplay && 7884 common_modes[i].h == native_mode->vdisplay)) 7885 continue; 7886 7887 list_for_each_entry(curmode, &connector->probed_modes, head) { 7888 if (common_modes[i].w == curmode->hdisplay && 7889 common_modes[i].h == curmode->vdisplay) { 7890 mode_existed = true; 7891 break; 7892 } 7893 } 7894 7895 if (mode_existed) 7896 continue; 7897 7898 mode = amdgpu_dm_create_common_mode(encoder, 7899 common_modes[i].name, common_modes[i].w, 7900 common_modes[i].h); 7901 if (!mode) 7902 continue; 7903 7904 drm_mode_probed_add(connector, mode); 7905 amdgpu_dm_connector->num_modes++; 7906 } 7907 } 7908 7909 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7910 { 7911 struct drm_encoder *encoder; 7912 struct amdgpu_encoder *amdgpu_encoder; 7913 const struct drm_display_mode *native_mode; 7914 7915 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7916 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7917 return; 7918 7919 mutex_lock(&connector->dev->mode_config.mutex); 7920 amdgpu_dm_connector_get_modes(connector); 7921 mutex_unlock(&connector->dev->mode_config.mutex); 7922 7923 encoder = amdgpu_dm_connector_to_encoder(connector); 7924 if (!encoder) 7925 return; 7926 7927 amdgpu_encoder = to_amdgpu_encoder(encoder); 7928 7929 native_mode = &amdgpu_encoder->native_mode; 7930 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7931 return; 7932 7933 drm_connector_set_panel_orientation_with_quirk(connector, 7934 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7935 native_mode->hdisplay, 7936 native_mode->vdisplay); 7937 } 7938 7939 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7940 const struct drm_edid *drm_edid) 7941 { 7942 struct amdgpu_dm_connector *amdgpu_dm_connector = 7943 to_amdgpu_dm_connector(connector); 7944 7945 if (drm_edid) { 7946 /* empty probed_modes */ 7947 INIT_LIST_HEAD(&connector->probed_modes); 7948 amdgpu_dm_connector->num_modes = 7949 drm_edid_connector_add_modes(connector); 7950 7951 /* sorting the probed modes before calling function 7952 * amdgpu_dm_get_native_mode() since EDID can have 7953 * more than one preferred mode. The modes that are 7954 * later in the probed mode list could be of higher 7955 * and preferred resolution. For example, 3840x2160 7956 * resolution in base EDID preferred timing and 4096x2160 7957 * preferred resolution in DID extension block later. 7958 */ 7959 drm_mode_sort(&connector->probed_modes); 7960 amdgpu_dm_get_native_mode(connector); 7961 7962 /* Freesync capabilities are reset by calling 7963 * drm_edid_connector_add_modes() and need to be 7964 * restored here. 7965 */ 7966 amdgpu_dm_update_freesync_caps(connector, drm_edid); 7967 } else { 7968 amdgpu_dm_connector->num_modes = 0; 7969 } 7970 } 7971 7972 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7973 struct drm_display_mode *mode) 7974 { 7975 struct drm_display_mode *m; 7976 7977 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7978 if (drm_mode_equal(m, mode)) 7979 return true; 7980 } 7981 7982 return false; 7983 } 7984 7985 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7986 { 7987 const struct drm_display_mode *m; 7988 struct drm_display_mode *new_mode; 7989 uint i; 7990 u32 new_modes_count = 0; 7991 7992 /* Standard FPS values 7993 * 7994 * 23.976 - TV/NTSC 7995 * 24 - Cinema 7996 * 25 - TV/PAL 7997 * 29.97 - TV/NTSC 7998 * 30 - TV/NTSC 7999 * 48 - Cinema HFR 8000 * 50 - TV/PAL 8001 * 60 - Commonly used 8002 * 48,72,96,120 - Multiples of 24 8003 */ 8004 static const u32 common_rates[] = { 8005 23976, 24000, 25000, 29970, 30000, 8006 48000, 50000, 60000, 72000, 96000, 120000 8007 }; 8008 8009 /* 8010 * Find mode with highest refresh rate with the same resolution 8011 * as the preferred mode. Some monitors report a preferred mode 8012 * with lower resolution than the highest refresh rate supported. 8013 */ 8014 8015 m = get_highest_refresh_rate_mode(aconnector, true); 8016 if (!m) 8017 return 0; 8018 8019 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8020 u64 target_vtotal, target_vtotal_diff; 8021 u64 num, den; 8022 8023 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8024 continue; 8025 8026 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8027 common_rates[i] > aconnector->max_vfreq * 1000) 8028 continue; 8029 8030 num = (unsigned long long)m->clock * 1000 * 1000; 8031 den = common_rates[i] * (unsigned long long)m->htotal; 8032 target_vtotal = div_u64(num, den); 8033 target_vtotal_diff = target_vtotal - m->vtotal; 8034 8035 /* Check for illegal modes */ 8036 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8037 m->vsync_end + target_vtotal_diff < m->vsync_start || 8038 m->vtotal + target_vtotal_diff < m->vsync_end) 8039 continue; 8040 8041 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8042 if (!new_mode) 8043 goto out; 8044 8045 new_mode->vtotal += (u16)target_vtotal_diff; 8046 new_mode->vsync_start += (u16)target_vtotal_diff; 8047 new_mode->vsync_end += (u16)target_vtotal_diff; 8048 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8049 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8050 8051 if (!is_duplicate_mode(aconnector, new_mode)) { 8052 drm_mode_probed_add(&aconnector->base, new_mode); 8053 new_modes_count += 1; 8054 } else 8055 drm_mode_destroy(aconnector->base.dev, new_mode); 8056 } 8057 out: 8058 return new_modes_count; 8059 } 8060 8061 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8062 const struct drm_edid *drm_edid) 8063 { 8064 struct amdgpu_dm_connector *amdgpu_dm_connector = 8065 to_amdgpu_dm_connector(connector); 8066 8067 if (!(amdgpu_freesync_vid_mode && drm_edid)) 8068 return; 8069 8070 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8071 amdgpu_dm_connector->num_modes += 8072 add_fs_modes(amdgpu_dm_connector); 8073 } 8074 8075 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8076 { 8077 struct amdgpu_dm_connector *amdgpu_dm_connector = 8078 to_amdgpu_dm_connector(connector); 8079 struct drm_encoder *encoder; 8080 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; 8081 struct dc_link_settings *verified_link_cap = 8082 &amdgpu_dm_connector->dc_link->verified_link_cap; 8083 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 8084 8085 encoder = amdgpu_dm_connector_to_encoder(connector); 8086 8087 if (!drm_edid) { 8088 amdgpu_dm_connector->num_modes = 8089 drm_add_modes_noedid(connector, 640, 480); 8090 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 8091 amdgpu_dm_connector->num_modes += 8092 drm_add_modes_noedid(connector, 1920, 1080); 8093 } else { 8094 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); 8095 if (encoder) 8096 amdgpu_dm_connector_add_common_modes(encoder, connector); 8097 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); 8098 } 8099 amdgpu_dm_fbc_init(connector); 8100 8101 return amdgpu_dm_connector->num_modes; 8102 } 8103 8104 static const u32 supported_colorspaces = 8105 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 8106 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 8107 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 8108 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 8109 8110 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8111 struct amdgpu_dm_connector *aconnector, 8112 int connector_type, 8113 struct dc_link *link, 8114 int link_index) 8115 { 8116 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8117 8118 /* 8119 * Some of the properties below require access to state, like bpc. 8120 * Allocate some default initial connector state with our reset helper. 8121 */ 8122 if (aconnector->base.funcs->reset) 8123 aconnector->base.funcs->reset(&aconnector->base); 8124 8125 aconnector->connector_id = link_index; 8126 aconnector->bl_idx = -1; 8127 aconnector->dc_link = link; 8128 aconnector->base.interlace_allowed = false; 8129 aconnector->base.doublescan_allowed = false; 8130 aconnector->base.stereo_allowed = false; 8131 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8132 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8133 aconnector->audio_inst = -1; 8134 aconnector->pack_sdp_v1_3 = false; 8135 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 8136 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 8137 mutex_init(&aconnector->hpd_lock); 8138 mutex_init(&aconnector->handle_mst_msg_ready); 8139 8140 /* 8141 * configure support HPD hot plug connector_>polled default value is 0 8142 * which means HPD hot plug not supported 8143 */ 8144 switch (connector_type) { 8145 case DRM_MODE_CONNECTOR_HDMIA: 8146 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8147 aconnector->base.ycbcr_420_allowed = 8148 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8149 break; 8150 case DRM_MODE_CONNECTOR_DisplayPort: 8151 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8152 link->link_enc = link_enc_cfg_get_link_enc(link); 8153 ASSERT(link->link_enc); 8154 if (link->link_enc) 8155 aconnector->base.ycbcr_420_allowed = 8156 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8157 break; 8158 case DRM_MODE_CONNECTOR_DVID: 8159 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8160 break; 8161 default: 8162 break; 8163 } 8164 8165 drm_object_attach_property(&aconnector->base.base, 8166 dm->ddev->mode_config.scaling_mode_property, 8167 DRM_MODE_SCALE_NONE); 8168 8169 drm_object_attach_property(&aconnector->base.base, 8170 adev->mode_info.underscan_property, 8171 UNDERSCAN_OFF); 8172 drm_object_attach_property(&aconnector->base.base, 8173 adev->mode_info.underscan_hborder_property, 8174 0); 8175 drm_object_attach_property(&aconnector->base.base, 8176 adev->mode_info.underscan_vborder_property, 8177 0); 8178 8179 if (!aconnector->mst_root) 8180 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8181 8182 aconnector->base.state->max_bpc = 16; 8183 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8184 8185 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8186 /* Content Type is currently only implemented for HDMI. */ 8187 drm_connector_attach_content_type_property(&aconnector->base); 8188 } 8189 8190 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8191 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 8192 drm_connector_attach_colorspace_property(&aconnector->base); 8193 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 8194 connector_type == DRM_MODE_CONNECTOR_eDP) { 8195 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 8196 drm_connector_attach_colorspace_property(&aconnector->base); 8197 } 8198 8199 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8200 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8201 connector_type == DRM_MODE_CONNECTOR_eDP) { 8202 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8203 8204 if (!aconnector->mst_root) 8205 drm_connector_attach_vrr_capable_property(&aconnector->base); 8206 8207 if (adev->dm.hdcp_workqueue) 8208 drm_connector_attach_content_protection_property(&aconnector->base, true); 8209 } 8210 } 8211 8212 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8213 struct i2c_msg *msgs, int num) 8214 { 8215 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8216 struct ddc_service *ddc_service = i2c->ddc_service; 8217 struct i2c_command cmd; 8218 int i; 8219 int result = -EIO; 8220 8221 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 8222 return result; 8223 8224 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8225 8226 if (!cmd.payloads) 8227 return result; 8228 8229 cmd.number_of_payloads = num; 8230 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8231 cmd.speed = 100; 8232 8233 for (i = 0; i < num; i++) { 8234 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8235 cmd.payloads[i].address = msgs[i].addr; 8236 cmd.payloads[i].length = msgs[i].len; 8237 cmd.payloads[i].data = msgs[i].buf; 8238 } 8239 8240 if (dc_submit_i2c( 8241 ddc_service->ctx->dc, 8242 ddc_service->link->link_index, 8243 &cmd)) 8244 result = num; 8245 8246 kfree(cmd.payloads); 8247 return result; 8248 } 8249 8250 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8251 { 8252 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8253 } 8254 8255 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8256 .master_xfer = amdgpu_dm_i2c_xfer, 8257 .functionality = amdgpu_dm_i2c_func, 8258 }; 8259 8260 static struct amdgpu_i2c_adapter * 8261 create_i2c(struct ddc_service *ddc_service, 8262 int link_index, 8263 int *res) 8264 { 8265 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8266 struct amdgpu_i2c_adapter *i2c; 8267 8268 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8269 if (!i2c) 8270 return NULL; 8271 i2c->base.owner = THIS_MODULE; 8272 i2c->base.dev.parent = &adev->pdev->dev; 8273 i2c->base.algo = &amdgpu_dm_i2c_algo; 8274 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8275 i2c_set_adapdata(&i2c->base, i2c); 8276 i2c->ddc_service = ddc_service; 8277 8278 return i2c; 8279 } 8280 8281 8282 /* 8283 * Note: this function assumes that dc_link_detect() was called for the 8284 * dc_link which will be represented by this aconnector. 8285 */ 8286 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8287 struct amdgpu_dm_connector *aconnector, 8288 u32 link_index, 8289 struct amdgpu_encoder *aencoder) 8290 { 8291 int res = 0; 8292 int connector_type; 8293 struct dc *dc = dm->dc; 8294 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8295 struct amdgpu_i2c_adapter *i2c; 8296 8297 /* Not needed for writeback connector */ 8298 link->priv = aconnector; 8299 8300 8301 i2c = create_i2c(link->ddc, link->link_index, &res); 8302 if (!i2c) { 8303 DRM_ERROR("Failed to create i2c adapter data\n"); 8304 return -ENOMEM; 8305 } 8306 8307 aconnector->i2c = i2c; 8308 res = i2c_add_adapter(&i2c->base); 8309 8310 if (res) { 8311 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8312 goto out_free; 8313 } 8314 8315 connector_type = to_drm_connector_type(link->connector_signal); 8316 8317 res = drm_connector_init_with_ddc( 8318 dm->ddev, 8319 &aconnector->base, 8320 &amdgpu_dm_connector_funcs, 8321 connector_type, 8322 &i2c->base); 8323 8324 if (res) { 8325 DRM_ERROR("connector_init failed\n"); 8326 aconnector->connector_id = -1; 8327 goto out_free; 8328 } 8329 8330 drm_connector_helper_add( 8331 &aconnector->base, 8332 &amdgpu_dm_connector_helper_funcs); 8333 8334 amdgpu_dm_connector_init_helper( 8335 dm, 8336 aconnector, 8337 connector_type, 8338 link, 8339 link_index); 8340 8341 drm_connector_attach_encoder( 8342 &aconnector->base, &aencoder->base); 8343 8344 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8345 || connector_type == DRM_MODE_CONNECTOR_eDP) 8346 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8347 8348 out_free: 8349 if (res) { 8350 kfree(i2c); 8351 aconnector->i2c = NULL; 8352 } 8353 return res; 8354 } 8355 8356 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8357 { 8358 switch (adev->mode_info.num_crtc) { 8359 case 1: 8360 return 0x1; 8361 case 2: 8362 return 0x3; 8363 case 3: 8364 return 0x7; 8365 case 4: 8366 return 0xf; 8367 case 5: 8368 return 0x1f; 8369 case 6: 8370 default: 8371 return 0x3f; 8372 } 8373 } 8374 8375 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8376 struct amdgpu_encoder *aencoder, 8377 uint32_t link_index) 8378 { 8379 struct amdgpu_device *adev = drm_to_adev(dev); 8380 8381 int res = drm_encoder_init(dev, 8382 &aencoder->base, 8383 &amdgpu_dm_encoder_funcs, 8384 DRM_MODE_ENCODER_TMDS, 8385 NULL); 8386 8387 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8388 8389 if (!res) 8390 aencoder->encoder_id = link_index; 8391 else 8392 aencoder->encoder_id = -1; 8393 8394 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8395 8396 return res; 8397 } 8398 8399 static void manage_dm_interrupts(struct amdgpu_device *adev, 8400 struct amdgpu_crtc *acrtc, 8401 struct dm_crtc_state *acrtc_state) 8402 { 8403 struct drm_vblank_crtc_config config = {0}; 8404 struct dc_crtc_timing *timing; 8405 int offdelay; 8406 8407 if (acrtc_state) { 8408 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8409 IP_VERSION(3, 5, 0) || 8410 acrtc_state->stream->link->psr_settings.psr_version < 8411 DC_PSR_VERSION_UNSUPPORTED || 8412 !(adev->flags & AMD_IS_APU)) { 8413 timing = &acrtc_state->stream->timing; 8414 8415 /* at least 2 frames */ 8416 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8417 timing->v_total * 8418 timing->h_total, 8419 timing->pix_clk_100hz); 8420 8421 config.offdelay_ms = offdelay ?: 30; 8422 } else { 8423 config.disable_immediate = true; 8424 } 8425 8426 drm_crtc_vblank_on_config(&acrtc->base, 8427 &config); 8428 } else { 8429 drm_crtc_vblank_off(&acrtc->base); 8430 } 8431 } 8432 8433 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8434 struct amdgpu_crtc *acrtc) 8435 { 8436 int irq_type = 8437 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8438 8439 /** 8440 * This reads the current state for the IRQ and force reapplies 8441 * the setting to hardware. 8442 */ 8443 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8444 } 8445 8446 static bool 8447 is_scaling_state_different(const struct dm_connector_state *dm_state, 8448 const struct dm_connector_state *old_dm_state) 8449 { 8450 if (dm_state->scaling != old_dm_state->scaling) 8451 return true; 8452 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8453 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8454 return true; 8455 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8456 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8457 return true; 8458 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8459 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8460 return true; 8461 return false; 8462 } 8463 8464 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 8465 struct drm_crtc_state *old_crtc_state, 8466 struct drm_connector_state *new_conn_state, 8467 struct drm_connector_state *old_conn_state, 8468 const struct drm_connector *connector, 8469 struct hdcp_workqueue *hdcp_w) 8470 { 8471 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8472 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8473 8474 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8475 connector->index, connector->status, connector->dpms); 8476 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8477 old_conn_state->content_protection, new_conn_state->content_protection); 8478 8479 if (old_crtc_state) 8480 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8481 old_crtc_state->enable, 8482 old_crtc_state->active, 8483 old_crtc_state->mode_changed, 8484 old_crtc_state->active_changed, 8485 old_crtc_state->connectors_changed); 8486 8487 if (new_crtc_state) 8488 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8489 new_crtc_state->enable, 8490 new_crtc_state->active, 8491 new_crtc_state->mode_changed, 8492 new_crtc_state->active_changed, 8493 new_crtc_state->connectors_changed); 8494 8495 /* hdcp content type change */ 8496 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 8497 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8498 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8499 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 8500 return true; 8501 } 8502 8503 /* CP is being re enabled, ignore this */ 8504 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8505 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8506 if (new_crtc_state && new_crtc_state->mode_changed) { 8507 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8508 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 8509 return true; 8510 } 8511 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8512 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 8513 return false; 8514 } 8515 8516 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8517 * 8518 * Handles: UNDESIRED -> ENABLED 8519 */ 8520 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8521 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8522 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8523 8524 /* Stream removed and re-enabled 8525 * 8526 * Can sometimes overlap with the HPD case, 8527 * thus set update_hdcp to false to avoid 8528 * setting HDCP multiple times. 8529 * 8530 * Handles: DESIRED -> DESIRED (Special case) 8531 */ 8532 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 8533 new_conn_state->crtc && new_conn_state->crtc->enabled && 8534 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8535 dm_con_state->update_hdcp = false; 8536 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 8537 __func__); 8538 return true; 8539 } 8540 8541 /* Hot-plug, headless s3, dpms 8542 * 8543 * Only start HDCP if the display is connected/enabled. 8544 * update_hdcp flag will be set to false until the next 8545 * HPD comes in. 8546 * 8547 * Handles: DESIRED -> DESIRED (Special case) 8548 */ 8549 if (dm_con_state->update_hdcp && 8550 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8551 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8552 dm_con_state->update_hdcp = false; 8553 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 8554 __func__); 8555 return true; 8556 } 8557 8558 if (old_conn_state->content_protection == new_conn_state->content_protection) { 8559 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8560 if (new_crtc_state && new_crtc_state->mode_changed) { 8561 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 8562 __func__); 8563 return true; 8564 } 8565 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 8566 __func__); 8567 return false; 8568 } 8569 8570 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 8571 return false; 8572 } 8573 8574 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8575 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 8576 __func__); 8577 return true; 8578 } 8579 8580 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 8581 return false; 8582 } 8583 8584 static void remove_stream(struct amdgpu_device *adev, 8585 struct amdgpu_crtc *acrtc, 8586 struct dc_stream_state *stream) 8587 { 8588 /* this is the update mode case */ 8589 8590 acrtc->otg_inst = -1; 8591 acrtc->enabled = false; 8592 } 8593 8594 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8595 { 8596 8597 assert_spin_locked(&acrtc->base.dev->event_lock); 8598 WARN_ON(acrtc->event); 8599 8600 acrtc->event = acrtc->base.state->event; 8601 8602 /* Set the flip status */ 8603 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8604 8605 /* Mark this event as consumed */ 8606 acrtc->base.state->event = NULL; 8607 8608 drm_dbg_state(acrtc->base.dev, 8609 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8610 acrtc->crtc_id); 8611 } 8612 8613 static void update_freesync_state_on_stream( 8614 struct amdgpu_display_manager *dm, 8615 struct dm_crtc_state *new_crtc_state, 8616 struct dc_stream_state *new_stream, 8617 struct dc_plane_state *surface, 8618 u32 flip_timestamp_in_us) 8619 { 8620 struct mod_vrr_params vrr_params; 8621 struct dc_info_packet vrr_infopacket = {0}; 8622 struct amdgpu_device *adev = dm->adev; 8623 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8624 unsigned long flags; 8625 bool pack_sdp_v1_3 = false; 8626 struct amdgpu_dm_connector *aconn; 8627 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 8628 8629 if (!new_stream) 8630 return; 8631 8632 /* 8633 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8634 * For now it's sufficient to just guard against these conditions. 8635 */ 8636 8637 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8638 return; 8639 8640 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8641 vrr_params = acrtc->dm_irq_params.vrr_params; 8642 8643 if (surface) { 8644 mod_freesync_handle_preflip( 8645 dm->freesync_module, 8646 surface, 8647 new_stream, 8648 flip_timestamp_in_us, 8649 &vrr_params); 8650 8651 if (adev->family < AMDGPU_FAMILY_AI && 8652 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 8653 mod_freesync_handle_v_update(dm->freesync_module, 8654 new_stream, &vrr_params); 8655 8656 /* Need to call this before the frame ends. */ 8657 dc_stream_adjust_vmin_vmax(dm->dc, 8658 new_crtc_state->stream, 8659 &vrr_params.adjust); 8660 } 8661 } 8662 8663 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 8664 8665 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 8666 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 8667 8668 if (aconn->vsdb_info.amd_vsdb_version == 1) 8669 packet_type = PACKET_TYPE_FS_V1; 8670 else if (aconn->vsdb_info.amd_vsdb_version == 2) 8671 packet_type = PACKET_TYPE_FS_V2; 8672 else if (aconn->vsdb_info.amd_vsdb_version == 3) 8673 packet_type = PACKET_TYPE_FS_V3; 8674 8675 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 8676 &new_stream->adaptive_sync_infopacket); 8677 } 8678 8679 mod_freesync_build_vrr_infopacket( 8680 dm->freesync_module, 8681 new_stream, 8682 &vrr_params, 8683 packet_type, 8684 TRANSFER_FUNC_UNKNOWN, 8685 &vrr_infopacket, 8686 pack_sdp_v1_3); 8687 8688 new_crtc_state->freesync_vrr_info_changed |= 8689 (memcmp(&new_crtc_state->vrr_infopacket, 8690 &vrr_infopacket, 8691 sizeof(vrr_infopacket)) != 0); 8692 8693 acrtc->dm_irq_params.vrr_params = vrr_params; 8694 new_crtc_state->vrr_infopacket = vrr_infopacket; 8695 8696 new_stream->vrr_infopacket = vrr_infopacket; 8697 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 8698 8699 if (new_crtc_state->freesync_vrr_info_changed) 8700 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8701 new_crtc_state->base.crtc->base.id, 8702 (int)new_crtc_state->base.vrr_enabled, 8703 (int)vrr_params.state); 8704 8705 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8706 } 8707 8708 static void update_stream_irq_parameters( 8709 struct amdgpu_display_manager *dm, 8710 struct dm_crtc_state *new_crtc_state) 8711 { 8712 struct dc_stream_state *new_stream = new_crtc_state->stream; 8713 struct mod_vrr_params vrr_params; 8714 struct mod_freesync_config config = new_crtc_state->freesync_config; 8715 struct amdgpu_device *adev = dm->adev; 8716 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8717 unsigned long flags; 8718 8719 if (!new_stream) 8720 return; 8721 8722 /* 8723 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8724 * For now it's sufficient to just guard against these conditions. 8725 */ 8726 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8727 return; 8728 8729 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8730 vrr_params = acrtc->dm_irq_params.vrr_params; 8731 8732 if (new_crtc_state->vrr_supported && 8733 config.min_refresh_in_uhz && 8734 config.max_refresh_in_uhz) { 8735 /* 8736 * if freesync compatible mode was set, config.state will be set 8737 * in atomic check 8738 */ 8739 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8740 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8741 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8742 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8743 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8744 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8745 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8746 } else { 8747 config.state = new_crtc_state->base.vrr_enabled ? 8748 VRR_STATE_ACTIVE_VARIABLE : 8749 VRR_STATE_INACTIVE; 8750 } 8751 } else { 8752 config.state = VRR_STATE_UNSUPPORTED; 8753 } 8754 8755 mod_freesync_build_vrr_params(dm->freesync_module, 8756 new_stream, 8757 &config, &vrr_params); 8758 8759 new_crtc_state->freesync_config = config; 8760 /* Copy state for access from DM IRQ handler */ 8761 acrtc->dm_irq_params.freesync_config = config; 8762 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8763 acrtc->dm_irq_params.vrr_params = vrr_params; 8764 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8765 } 8766 8767 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8768 struct dm_crtc_state *new_state) 8769 { 8770 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 8771 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 8772 8773 if (!old_vrr_active && new_vrr_active) { 8774 /* Transition VRR inactive -> active: 8775 * While VRR is active, we must not disable vblank irq, as a 8776 * reenable after disable would compute bogus vblank/pflip 8777 * timestamps if it likely happened inside display front-porch. 8778 * 8779 * We also need vupdate irq for the actual core vblank handling 8780 * at end of vblank. 8781 */ 8782 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 8783 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 8784 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8785 __func__, new_state->base.crtc->base.id); 8786 } else if (old_vrr_active && !new_vrr_active) { 8787 /* Transition VRR active -> inactive: 8788 * Allow vblank irq disable again for fixed refresh rate. 8789 */ 8790 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 8791 drm_crtc_vblank_put(new_state->base.crtc); 8792 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8793 __func__, new_state->base.crtc->base.id); 8794 } 8795 } 8796 8797 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8798 { 8799 struct drm_plane *plane; 8800 struct drm_plane_state *old_plane_state; 8801 int i; 8802 8803 /* 8804 * TODO: Make this per-stream so we don't issue redundant updates for 8805 * commits with multiple streams. 8806 */ 8807 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8808 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8809 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8810 } 8811 8812 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8813 { 8814 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8815 8816 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8817 } 8818 8819 static void amdgpu_dm_update_cursor(struct drm_plane *plane, 8820 struct drm_plane_state *old_plane_state, 8821 struct dc_stream_update *update) 8822 { 8823 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8824 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8825 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8826 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8827 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8828 uint64_t address = afb ? afb->address : 0; 8829 struct dc_cursor_position position = {0}; 8830 struct dc_cursor_attributes attributes; 8831 int ret; 8832 8833 if (!plane->state->fb && !old_plane_state->fb) 8834 return; 8835 8836 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 8837 amdgpu_crtc->crtc_id, plane->state->crtc_w, 8838 plane->state->crtc_h); 8839 8840 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 8841 if (ret) 8842 return; 8843 8844 if (!position.enable) { 8845 /* turn off cursor */ 8846 if (crtc_state && crtc_state->stream) { 8847 dc_stream_set_cursor_position(crtc_state->stream, 8848 &position); 8849 update->cursor_position = &crtc_state->stream->cursor_position; 8850 } 8851 return; 8852 } 8853 8854 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8855 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8856 8857 memset(&attributes, 0, sizeof(attributes)); 8858 attributes.address.high_part = upper_32_bits(address); 8859 attributes.address.low_part = lower_32_bits(address); 8860 attributes.width = plane->state->crtc_w; 8861 attributes.height = plane->state->crtc_h; 8862 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8863 attributes.rotation_angle = 0; 8864 attributes.attribute_flags.value = 0; 8865 8866 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 8867 * legacy gamma setup. 8868 */ 8869 if (crtc_state->cm_is_degamma_srgb && 8870 adev->dm.dc->caps.color.dpp.gamma_corr) 8871 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 8872 8873 if (afb) 8874 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8875 8876 if (crtc_state->stream) { 8877 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8878 &attributes)) 8879 DRM_ERROR("DC failed to set cursor attributes\n"); 8880 8881 update->cursor_attributes = &crtc_state->stream->cursor_attributes; 8882 8883 if (!dc_stream_set_cursor_position(crtc_state->stream, 8884 &position)) 8885 DRM_ERROR("DC failed to set cursor position\n"); 8886 8887 update->cursor_position = &crtc_state->stream->cursor_position; 8888 } 8889 } 8890 8891 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 8892 const struct dm_crtc_state *acrtc_state, 8893 const u64 current_ts) 8894 { 8895 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 8896 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 8897 struct amdgpu_dm_connector *aconn = 8898 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8899 8900 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 8901 if (pr->config.replay_supported && !pr->replay_feature_enabled) 8902 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 8903 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 8904 !psr->psr_feature_enabled) 8905 if (!aconn->disallow_edp_enter_psr) 8906 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8907 } 8908 8909 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 8910 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8911 (psr->psr_feature_enabled || pr->config.replay_supported)) { 8912 if (aconn->sr_skip_count > 0) 8913 aconn->sr_skip_count--; 8914 8915 /* Allow SR when skip count is 0. */ 8916 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 8917 8918 /* 8919 * If sink supports PSR SU/Panel Replay, there is no need to rely on 8920 * a vblank event disable request to enable PSR/RP. PSR SU/RP 8921 * can be enabled immediately once OS demonstrates an 8922 * adequate number of fast atomic commits to notify KMD 8923 * of update events. See `vblank_control_worker()`. 8924 */ 8925 if (acrtc_attach->dm_irq_params.allow_sr_entry && 8926 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8927 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8928 #endif 8929 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 8930 if (pr->replay_feature_enabled && !pr->replay_allow_active) 8931 amdgpu_dm_replay_enable(acrtc_state->stream, true); 8932 if (psr->psr_version >= DC_PSR_VERSION_SU_1 && 8933 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 8934 amdgpu_dm_psr_enable(acrtc_state->stream); 8935 } 8936 } else { 8937 acrtc_attach->dm_irq_params.allow_sr_entry = false; 8938 } 8939 } 8940 8941 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8942 struct drm_device *dev, 8943 struct amdgpu_display_manager *dm, 8944 struct drm_crtc *pcrtc, 8945 bool wait_for_vblank) 8946 { 8947 u32 i; 8948 u64 timestamp_ns = ktime_get_ns(); 8949 struct drm_plane *plane; 8950 struct drm_plane_state *old_plane_state, *new_plane_state; 8951 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8952 struct drm_crtc_state *new_pcrtc_state = 8953 drm_atomic_get_new_crtc_state(state, pcrtc); 8954 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8955 struct dm_crtc_state *dm_old_crtc_state = 8956 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8957 int planes_count = 0, vpos, hpos; 8958 unsigned long flags; 8959 u32 target_vblank, last_flip_vblank; 8960 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8961 bool cursor_update = false; 8962 bool pflip_present = false; 8963 bool dirty_rects_changed = false; 8964 bool updated_planes_and_streams = false; 8965 struct { 8966 struct dc_surface_update surface_updates[MAX_SURFACES]; 8967 struct dc_plane_info plane_infos[MAX_SURFACES]; 8968 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8969 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8970 struct dc_stream_update stream_update; 8971 } *bundle; 8972 8973 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8974 8975 if (!bundle) { 8976 drm_err(dev, "Failed to allocate update bundle\n"); 8977 goto cleanup; 8978 } 8979 8980 /* 8981 * Disable the cursor first if we're disabling all the planes. 8982 * It'll remain on the screen after the planes are re-enabled 8983 * if we don't. 8984 * 8985 * If the cursor is transitioning from native to overlay mode, the 8986 * native cursor needs to be disabled first. 8987 */ 8988 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && 8989 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 8990 struct dc_cursor_position cursor_position = {0}; 8991 8992 if (!dc_stream_set_cursor_position(acrtc_state->stream, 8993 &cursor_position)) 8994 drm_err(dev, "DC failed to disable native cursor\n"); 8995 8996 bundle->stream_update.cursor_position = 8997 &acrtc_state->stream->cursor_position; 8998 } 8999 9000 if (acrtc_state->active_planes == 0 && 9001 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9002 amdgpu_dm_commit_cursors(state); 9003 9004 /* update planes when needed */ 9005 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9006 struct drm_crtc *crtc = new_plane_state->crtc; 9007 struct drm_crtc_state *new_crtc_state; 9008 struct drm_framebuffer *fb = new_plane_state->fb; 9009 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9010 bool plane_needs_flip; 9011 struct dc_plane_state *dc_plane; 9012 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9013 9014 /* Cursor plane is handled after stream updates */ 9015 if (plane->type == DRM_PLANE_TYPE_CURSOR && 9016 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9017 if ((fb && crtc == pcrtc) || 9018 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { 9019 cursor_update = true; 9020 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) 9021 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); 9022 } 9023 9024 continue; 9025 } 9026 9027 if (!fb || !crtc || pcrtc != crtc) 9028 continue; 9029 9030 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9031 if (!new_crtc_state->active) 9032 continue; 9033 9034 dc_plane = dm_new_plane_state->dc_state; 9035 if (!dc_plane) 9036 continue; 9037 9038 bundle->surface_updates[planes_count].surface = dc_plane; 9039 if (new_pcrtc_state->color_mgmt_changed) { 9040 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; 9041 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; 9042 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9043 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; 9044 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; 9045 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; 9046 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; 9047 } 9048 9049 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 9050 &bundle->scaling_infos[planes_count]); 9051 9052 bundle->surface_updates[planes_count].scaling_info = 9053 &bundle->scaling_infos[planes_count]; 9054 9055 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9056 9057 pflip_present = pflip_present || plane_needs_flip; 9058 9059 if (!plane_needs_flip) { 9060 planes_count += 1; 9061 continue; 9062 } 9063 9064 fill_dc_plane_info_and_addr( 9065 dm->adev, new_plane_state, 9066 afb->tiling_flags, 9067 &bundle->plane_infos[planes_count], 9068 &bundle->flip_addrs[planes_count].address, 9069 afb->tmz_surface, false); 9070 9071 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9072 new_plane_state->plane->index, 9073 bundle->plane_infos[planes_count].dcc.enable); 9074 9075 bundle->surface_updates[planes_count].plane_info = 9076 &bundle->plane_infos[planes_count]; 9077 9078 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 9079 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9080 fill_dc_dirty_rects(plane, old_plane_state, 9081 new_plane_state, new_crtc_state, 9082 &bundle->flip_addrs[planes_count], 9083 acrtc_state->stream->link->psr_settings.psr_version == 9084 DC_PSR_VERSION_SU_1, 9085 &dirty_rects_changed); 9086 9087 /* 9088 * If the dirty regions changed, PSR-SU need to be disabled temporarily 9089 * and enabled it again after dirty regions are stable to avoid video glitch. 9090 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 9091 * during the PSR-SU was disabled. 9092 */ 9093 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9094 acrtc_attach->dm_irq_params.allow_sr_entry && 9095 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9096 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9097 #endif 9098 dirty_rects_changed) { 9099 mutex_lock(&dm->dc_lock); 9100 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 9101 timestamp_ns; 9102 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9103 amdgpu_dm_psr_disable(acrtc_state->stream); 9104 mutex_unlock(&dm->dc_lock); 9105 } 9106 } 9107 9108 /* 9109 * Only allow immediate flips for fast updates that don't 9110 * change memory domain, FB pitch, DCC state, rotation or 9111 * mirroring. 9112 * 9113 * dm_crtc_helper_atomic_check() only accepts async flips with 9114 * fast updates. 9115 */ 9116 if (crtc->state->async_flip && 9117 (acrtc_state->update_type != UPDATE_TYPE_FAST || 9118 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 9119 drm_warn_once(state->dev, 9120 "[PLANE:%d:%s] async flip with non-fast update\n", 9121 plane->base.id, plane->name); 9122 9123 bundle->flip_addrs[planes_count].flip_immediate = 9124 crtc->state->async_flip && 9125 acrtc_state->update_type == UPDATE_TYPE_FAST && 9126 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 9127 9128 timestamp_ns = ktime_get_ns(); 9129 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9130 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9131 bundle->surface_updates[planes_count].surface = dc_plane; 9132 9133 if (!bundle->surface_updates[planes_count].surface) { 9134 DRM_ERROR("No surface for CRTC: id=%d\n", 9135 acrtc_attach->crtc_id); 9136 continue; 9137 } 9138 9139 if (plane == pcrtc->primary) 9140 update_freesync_state_on_stream( 9141 dm, 9142 acrtc_state, 9143 acrtc_state->stream, 9144 dc_plane, 9145 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9146 9147 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9148 __func__, 9149 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9150 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9151 9152 planes_count += 1; 9153 9154 } 9155 9156 if (pflip_present) { 9157 if (!vrr_active) { 9158 /* Use old throttling in non-vrr fixed refresh rate mode 9159 * to keep flip scheduling based on target vblank counts 9160 * working in a backwards compatible way, e.g., for 9161 * clients using the GLX_OML_sync_control extension or 9162 * DRI3/Present extension with defined target_msc. 9163 */ 9164 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9165 } else { 9166 /* For variable refresh rate mode only: 9167 * Get vblank of last completed flip to avoid > 1 vrr 9168 * flips per video frame by use of throttling, but allow 9169 * flip programming anywhere in the possibly large 9170 * variable vrr vblank interval for fine-grained flip 9171 * timing control and more opportunity to avoid stutter 9172 * on late submission of flips. 9173 */ 9174 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9175 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9176 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9177 } 9178 9179 target_vblank = last_flip_vblank + wait_for_vblank; 9180 9181 /* 9182 * Wait until we're out of the vertical blank period before the one 9183 * targeted by the flip 9184 */ 9185 while ((acrtc_attach->enabled && 9186 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9187 0, &vpos, &hpos, NULL, 9188 NULL, &pcrtc->hwmode) 9189 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9190 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9191 (int)(target_vblank - 9192 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9193 usleep_range(1000, 1100); 9194 } 9195 9196 /** 9197 * Prepare the flip event for the pageflip interrupt to handle. 9198 * 9199 * This only works in the case where we've already turned on the 9200 * appropriate hardware blocks (eg. HUBP) so in the transition case 9201 * from 0 -> n planes we have to skip a hardware generated event 9202 * and rely on sending it from software. 9203 */ 9204 if (acrtc_attach->base.state->event && 9205 acrtc_state->active_planes > 0) { 9206 drm_crtc_vblank_get(pcrtc); 9207 9208 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9209 9210 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9211 prepare_flip_isr(acrtc_attach); 9212 9213 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9214 } 9215 9216 if (acrtc_state->stream) { 9217 if (acrtc_state->freesync_vrr_info_changed) 9218 bundle->stream_update.vrr_infopacket = 9219 &acrtc_state->stream->vrr_infopacket; 9220 } 9221 } else if (cursor_update && acrtc_state->active_planes > 0) { 9222 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9223 if (acrtc_attach->base.state->event) { 9224 drm_crtc_vblank_get(pcrtc); 9225 acrtc_attach->event = acrtc_attach->base.state->event; 9226 acrtc_attach->base.state->event = NULL; 9227 } 9228 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9229 } 9230 9231 /* Update the planes if changed or disable if we don't have any. */ 9232 if ((planes_count || acrtc_state->active_planes == 0) && 9233 acrtc_state->stream) { 9234 /* 9235 * If PSR or idle optimizations are enabled then flush out 9236 * any pending work before hardware programming. 9237 */ 9238 if (dm->vblank_control_workqueue) 9239 flush_workqueue(dm->vblank_control_workqueue); 9240 9241 bundle->stream_update.stream = acrtc_state->stream; 9242 if (new_pcrtc_state->mode_changed) { 9243 bundle->stream_update.src = acrtc_state->stream->src; 9244 bundle->stream_update.dst = acrtc_state->stream->dst; 9245 } 9246 9247 if (new_pcrtc_state->color_mgmt_changed) { 9248 /* 9249 * TODO: This isn't fully correct since we've actually 9250 * already modified the stream in place. 9251 */ 9252 bundle->stream_update.gamut_remap = 9253 &acrtc_state->stream->gamut_remap_matrix; 9254 bundle->stream_update.output_csc_transform = 9255 &acrtc_state->stream->csc_color_matrix; 9256 bundle->stream_update.out_transfer_func = 9257 &acrtc_state->stream->out_transfer_func; 9258 bundle->stream_update.lut3d_func = 9259 (struct dc_3dlut *) acrtc_state->stream->lut3d_func; 9260 bundle->stream_update.func_shaper = 9261 (struct dc_transfer_func *) acrtc_state->stream->func_shaper; 9262 } 9263 9264 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9265 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9266 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9267 9268 mutex_lock(&dm->dc_lock); 9269 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9270 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9271 amdgpu_dm_replay_disable(acrtc_state->stream); 9272 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9273 amdgpu_dm_psr_disable(acrtc_state->stream); 9274 } 9275 mutex_unlock(&dm->dc_lock); 9276 9277 /* 9278 * If FreeSync state on the stream has changed then we need to 9279 * re-adjust the min/max bounds now that DC doesn't handle this 9280 * as part of commit. 9281 */ 9282 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9283 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9284 dc_stream_adjust_vmin_vmax( 9285 dm->dc, acrtc_state->stream, 9286 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9287 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9288 } 9289 mutex_lock(&dm->dc_lock); 9290 update_planes_and_stream_adapter(dm->dc, 9291 acrtc_state->update_type, 9292 planes_count, 9293 acrtc_state->stream, 9294 &bundle->stream_update, 9295 bundle->surface_updates); 9296 updated_planes_and_streams = true; 9297 9298 /** 9299 * Enable or disable the interrupts on the backend. 9300 * 9301 * Most pipes are put into power gating when unused. 9302 * 9303 * When power gating is enabled on a pipe we lose the 9304 * interrupt enablement state when power gating is disabled. 9305 * 9306 * So we need to update the IRQ control state in hardware 9307 * whenever the pipe turns on (since it could be previously 9308 * power gated) or off (since some pipes can't be power gated 9309 * on some ASICs). 9310 */ 9311 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9312 dm_update_pflip_irq_state(drm_to_adev(dev), 9313 acrtc_attach); 9314 9315 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 9316 mutex_unlock(&dm->dc_lock); 9317 } 9318 9319 /* 9320 * Update cursor state *after* programming all the planes. 9321 * This avoids redundant programming in the case where we're going 9322 * to be disabling a single plane - those pipes are being disabled. 9323 */ 9324 if (acrtc_state->active_planes && 9325 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && 9326 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9327 amdgpu_dm_commit_cursors(state); 9328 9329 cleanup: 9330 kfree(bundle); 9331 } 9332 9333 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9334 struct drm_atomic_state *state) 9335 { 9336 struct amdgpu_device *adev = drm_to_adev(dev); 9337 struct amdgpu_dm_connector *aconnector; 9338 struct drm_connector *connector; 9339 struct drm_connector_state *old_con_state, *new_con_state; 9340 struct drm_crtc_state *new_crtc_state; 9341 struct dm_crtc_state *new_dm_crtc_state; 9342 const struct dc_stream_status *status; 9343 int i, inst; 9344 9345 /* Notify device removals. */ 9346 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9347 if (old_con_state->crtc != new_con_state->crtc) { 9348 /* CRTC changes require notification. */ 9349 goto notify; 9350 } 9351 9352 if (!new_con_state->crtc) 9353 continue; 9354 9355 new_crtc_state = drm_atomic_get_new_crtc_state( 9356 state, new_con_state->crtc); 9357 9358 if (!new_crtc_state) 9359 continue; 9360 9361 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9362 continue; 9363 9364 notify: 9365 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9366 continue; 9367 9368 aconnector = to_amdgpu_dm_connector(connector); 9369 9370 mutex_lock(&adev->dm.audio_lock); 9371 inst = aconnector->audio_inst; 9372 aconnector->audio_inst = -1; 9373 mutex_unlock(&adev->dm.audio_lock); 9374 9375 amdgpu_dm_audio_eld_notify(adev, inst); 9376 } 9377 9378 /* Notify audio device additions. */ 9379 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9380 if (!new_con_state->crtc) 9381 continue; 9382 9383 new_crtc_state = drm_atomic_get_new_crtc_state( 9384 state, new_con_state->crtc); 9385 9386 if (!new_crtc_state) 9387 continue; 9388 9389 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9390 continue; 9391 9392 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9393 if (!new_dm_crtc_state->stream) 9394 continue; 9395 9396 status = dc_stream_get_status(new_dm_crtc_state->stream); 9397 if (!status) 9398 continue; 9399 9400 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9401 continue; 9402 9403 aconnector = to_amdgpu_dm_connector(connector); 9404 9405 mutex_lock(&adev->dm.audio_lock); 9406 inst = status->audio_inst; 9407 aconnector->audio_inst = inst; 9408 mutex_unlock(&adev->dm.audio_lock); 9409 9410 amdgpu_dm_audio_eld_notify(adev, inst); 9411 } 9412 } 9413 9414 /* 9415 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9416 * @crtc_state: the DRM CRTC state 9417 * @stream_state: the DC stream state. 9418 * 9419 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9420 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9421 */ 9422 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9423 struct dc_stream_state *stream_state) 9424 { 9425 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9426 } 9427 9428 static void dm_clear_writeback(struct amdgpu_display_manager *dm, 9429 struct dm_crtc_state *crtc_state) 9430 { 9431 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 9432 } 9433 9434 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 9435 struct dc_state *dc_state) 9436 { 9437 struct drm_device *dev = state->dev; 9438 struct amdgpu_device *adev = drm_to_adev(dev); 9439 struct amdgpu_display_manager *dm = &adev->dm; 9440 struct drm_crtc *crtc; 9441 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9442 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9443 struct drm_connector_state *old_con_state; 9444 struct drm_connector *connector; 9445 bool mode_set_reset_required = false; 9446 u32 i; 9447 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 9448 bool set_backlight_level = false; 9449 9450 /* Disable writeback */ 9451 for_each_old_connector_in_state(state, connector, old_con_state, i) { 9452 struct dm_connector_state *dm_old_con_state; 9453 struct amdgpu_crtc *acrtc; 9454 9455 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 9456 continue; 9457 9458 old_crtc_state = NULL; 9459 9460 dm_old_con_state = to_dm_connector_state(old_con_state); 9461 if (!dm_old_con_state->base.crtc) 9462 continue; 9463 9464 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 9465 if (acrtc) 9466 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9467 9468 if (!acrtc || !acrtc->wb_enabled) 9469 continue; 9470 9471 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9472 9473 dm_clear_writeback(dm, dm_old_crtc_state); 9474 acrtc->wb_enabled = false; 9475 } 9476 9477 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9478 new_crtc_state, i) { 9479 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9480 9481 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9482 9483 if (old_crtc_state->active && 9484 (!new_crtc_state->active || 9485 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9486 manage_dm_interrupts(adev, acrtc, NULL); 9487 dc_stream_release(dm_old_crtc_state->stream); 9488 } 9489 } 9490 9491 drm_atomic_helper_calc_timestamping_constants(state); 9492 9493 /* update changed items */ 9494 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9495 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9496 9497 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9498 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9499 9500 drm_dbg_state(state->dev, 9501 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9502 acrtc->crtc_id, 9503 new_crtc_state->enable, 9504 new_crtc_state->active, 9505 new_crtc_state->planes_changed, 9506 new_crtc_state->mode_changed, 9507 new_crtc_state->active_changed, 9508 new_crtc_state->connectors_changed); 9509 9510 /* Disable cursor if disabling crtc */ 9511 if (old_crtc_state->active && !new_crtc_state->active) { 9512 struct dc_cursor_position position; 9513 9514 memset(&position, 0, sizeof(position)); 9515 mutex_lock(&dm->dc_lock); 9516 dc_exit_ips_for_hw_access(dm->dc); 9517 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); 9518 mutex_unlock(&dm->dc_lock); 9519 } 9520 9521 /* Copy all transient state flags into dc state */ 9522 if (dm_new_crtc_state->stream) { 9523 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9524 dm_new_crtc_state->stream); 9525 } 9526 9527 /* handles headless hotplug case, updating new_state and 9528 * aconnector as needed 9529 */ 9530 9531 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9532 9533 drm_dbg_atomic(dev, 9534 "Atomic commit: SET crtc id %d: [%p]\n", 9535 acrtc->crtc_id, acrtc); 9536 9537 if (!dm_new_crtc_state->stream) { 9538 /* 9539 * this could happen because of issues with 9540 * userspace notifications delivery. 9541 * In this case userspace tries to set mode on 9542 * display which is disconnected in fact. 9543 * dc_sink is NULL in this case on aconnector. 9544 * We expect reset mode will come soon. 9545 * 9546 * This can also happen when unplug is done 9547 * during resume sequence ended 9548 * 9549 * In this case, we want to pretend we still 9550 * have a sink to keep the pipe running so that 9551 * hw state is consistent with the sw state 9552 */ 9553 drm_dbg_atomic(dev, 9554 "Failed to create new stream for crtc %d\n", 9555 acrtc->base.base.id); 9556 continue; 9557 } 9558 9559 if (dm_old_crtc_state->stream) 9560 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9561 9562 pm_runtime_get_noresume(dev->dev); 9563 9564 acrtc->enabled = true; 9565 acrtc->hw_mode = new_crtc_state->mode; 9566 crtc->hwmode = new_crtc_state->mode; 9567 mode_set_reset_required = true; 9568 set_backlight_level = true; 9569 } else if (modereset_required(new_crtc_state)) { 9570 drm_dbg_atomic(dev, 9571 "Atomic commit: RESET. crtc id %d:[%p]\n", 9572 acrtc->crtc_id, acrtc); 9573 /* i.e. reset mode */ 9574 if (dm_old_crtc_state->stream) 9575 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9576 9577 mode_set_reset_required = true; 9578 } 9579 } /* for_each_crtc_in_state() */ 9580 9581 /* if there mode set or reset, disable eDP PSR, Replay */ 9582 if (mode_set_reset_required) { 9583 if (dm->vblank_control_workqueue) 9584 flush_workqueue(dm->vblank_control_workqueue); 9585 9586 amdgpu_dm_replay_disable_all(dm); 9587 amdgpu_dm_psr_disable_all(dm); 9588 } 9589 9590 dm_enable_per_frame_crtc_master_sync(dc_state); 9591 mutex_lock(&dm->dc_lock); 9592 dc_exit_ips_for_hw_access(dm->dc); 9593 WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); 9594 9595 /* Allow idle optimization when vblank count is 0 for display off */ 9596 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) 9597 dc_allow_idle_optimizations(dm->dc, true); 9598 mutex_unlock(&dm->dc_lock); 9599 9600 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9601 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9602 9603 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9604 9605 if (dm_new_crtc_state->stream != NULL) { 9606 const struct dc_stream_status *status = 9607 dc_stream_get_status(dm_new_crtc_state->stream); 9608 9609 if (!status) 9610 status = dc_state_get_stream_status(dc_state, 9611 dm_new_crtc_state->stream); 9612 if (!status) 9613 drm_err(dev, 9614 "got no status for stream %p on acrtc%p\n", 9615 dm_new_crtc_state->stream, acrtc); 9616 else 9617 acrtc->otg_inst = status->primary_otg_inst; 9618 } 9619 } 9620 9621 /* During boot up and resume the DC layer will reset the panel brightness 9622 * to fix a flicker issue. 9623 * It will cause the dm->actual_brightness is not the current panel brightness 9624 * level. (the dm->brightness is the correct panel level) 9625 * So we set the backlight level with dm->brightness value after set mode 9626 */ 9627 if (set_backlight_level) { 9628 for (i = 0; i < dm->num_of_edps; i++) { 9629 if (dm->backlight_dev[i]) 9630 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9631 } 9632 } 9633 } 9634 9635 static void dm_set_writeback(struct amdgpu_display_manager *dm, 9636 struct dm_crtc_state *crtc_state, 9637 struct drm_connector *connector, 9638 struct drm_connector_state *new_con_state) 9639 { 9640 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 9641 struct amdgpu_device *adev = dm->adev; 9642 struct amdgpu_crtc *acrtc; 9643 struct dc_writeback_info *wb_info; 9644 struct pipe_ctx *pipe = NULL; 9645 struct amdgpu_framebuffer *afb; 9646 int i = 0; 9647 9648 wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); 9649 if (!wb_info) { 9650 DRM_ERROR("Failed to allocate wb_info\n"); 9651 return; 9652 } 9653 9654 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 9655 if (!acrtc) { 9656 DRM_ERROR("no amdgpu_crtc found\n"); 9657 kfree(wb_info); 9658 return; 9659 } 9660 9661 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 9662 if (!afb) { 9663 DRM_ERROR("No amdgpu_framebuffer found\n"); 9664 kfree(wb_info); 9665 return; 9666 } 9667 9668 for (i = 0; i < MAX_PIPES; i++) { 9669 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 9670 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 9671 break; 9672 } 9673 } 9674 9675 /* fill in wb_info */ 9676 wb_info->wb_enabled = true; 9677 9678 wb_info->dwb_pipe_inst = 0; 9679 wb_info->dwb_params.dwbscl_black_color = 0; 9680 wb_info->dwb_params.hdr_mult = 0x1F000; 9681 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 9682 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 9683 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 9684 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 9685 9686 /* width & height from crtc */ 9687 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 9688 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 9689 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 9690 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 9691 9692 wb_info->dwb_params.cnv_params.crop_en = false; 9693 wb_info->dwb_params.stereo_params.stereo_enabled = false; 9694 9695 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 9696 wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 9697 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 9698 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 9699 9700 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 9701 9702 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 9703 9704 wb_info->dwb_params.scaler_taps.h_taps = 4; 9705 wb_info->dwb_params.scaler_taps.v_taps = 4; 9706 wb_info->dwb_params.scaler_taps.h_taps_c = 2; 9707 wb_info->dwb_params.scaler_taps.v_taps_c = 2; 9708 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 9709 9710 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 9711 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 9712 9713 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 9714 wb_info->mcif_buf_params.luma_address[i] = afb->address; 9715 wb_info->mcif_buf_params.chroma_address[i] = 0; 9716 } 9717 9718 wb_info->mcif_buf_params.p_vmid = 1; 9719 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { 9720 wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 9721 wb_info->mcif_warmup_params.region_size = 9722 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 9723 } 9724 wb_info->mcif_warmup_params.p_vmid = 1; 9725 wb_info->writeback_source_plane = pipe->plane_state; 9726 9727 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 9728 9729 acrtc->wb_pending = true; 9730 acrtc->wb_conn = wb_conn; 9731 drm_writeback_queue_job(wb_conn, new_con_state); 9732 } 9733 9734 /** 9735 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9736 * @state: The atomic state to commit 9737 * 9738 * This will tell DC to commit the constructed DC state from atomic_check, 9739 * programming the hardware. Any failures here implies a hardware failure, since 9740 * atomic check should have filtered anything non-kosher. 9741 */ 9742 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9743 { 9744 struct drm_device *dev = state->dev; 9745 struct amdgpu_device *adev = drm_to_adev(dev); 9746 struct amdgpu_display_manager *dm = &adev->dm; 9747 struct dm_atomic_state *dm_state; 9748 struct dc_state *dc_state = NULL; 9749 u32 i, j; 9750 struct drm_crtc *crtc; 9751 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9752 unsigned long flags; 9753 bool wait_for_vblank = true; 9754 struct drm_connector *connector; 9755 struct drm_connector_state *old_con_state, *new_con_state; 9756 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9757 int crtc_disable_count = 0; 9758 9759 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9760 9761 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9762 drm_dp_mst_atomic_wait_for_dependencies(state); 9763 9764 dm_state = dm_atomic_get_new_state(state); 9765 if (dm_state && dm_state->context) { 9766 dc_state = dm_state->context; 9767 amdgpu_dm_commit_streams(state, dc_state); 9768 } 9769 9770 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9771 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9772 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9773 struct amdgpu_dm_connector *aconnector; 9774 9775 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9776 continue; 9777 9778 aconnector = to_amdgpu_dm_connector(connector); 9779 9780 if (!adev->dm.hdcp_workqueue) 9781 continue; 9782 9783 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 9784 9785 if (!connector) 9786 continue; 9787 9788 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 9789 connector->index, connector->status, connector->dpms); 9790 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 9791 old_con_state->content_protection, new_con_state->content_protection); 9792 9793 if (aconnector->dc_sink) { 9794 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 9795 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 9796 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 9797 aconnector->dc_sink->edid_caps.display_name); 9798 } 9799 } 9800 9801 new_crtc_state = NULL; 9802 old_crtc_state = NULL; 9803 9804 if (acrtc) { 9805 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9806 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9807 } 9808 9809 if (old_crtc_state) 9810 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9811 old_crtc_state->enable, 9812 old_crtc_state->active, 9813 old_crtc_state->mode_changed, 9814 old_crtc_state->active_changed, 9815 old_crtc_state->connectors_changed); 9816 9817 if (new_crtc_state) 9818 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 9819 new_crtc_state->enable, 9820 new_crtc_state->active, 9821 new_crtc_state->mode_changed, 9822 new_crtc_state->active_changed, 9823 new_crtc_state->connectors_changed); 9824 } 9825 9826 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9827 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9828 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9829 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9830 9831 if (!adev->dm.hdcp_workqueue) 9832 continue; 9833 9834 new_crtc_state = NULL; 9835 old_crtc_state = NULL; 9836 9837 if (acrtc) { 9838 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9839 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9840 } 9841 9842 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9843 9844 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9845 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9846 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9847 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9848 dm_new_con_state->update_hdcp = true; 9849 continue; 9850 } 9851 9852 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 9853 old_con_state, connector, adev->dm.hdcp_workqueue)) { 9854 /* when display is unplugged from mst hub, connctor will 9855 * be destroyed within dm_dp_mst_connector_destroy. connector 9856 * hdcp perperties, like type, undesired, desired, enabled, 9857 * will be lost. So, save hdcp properties into hdcp_work within 9858 * amdgpu_dm_atomic_commit_tail. if the same display is 9859 * plugged back with same display index, its hdcp properties 9860 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 9861 */ 9862 9863 bool enable_encryption = false; 9864 9865 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 9866 enable_encryption = true; 9867 9868 if (aconnector->dc_link && aconnector->dc_sink && 9869 aconnector->dc_link->type == dc_connection_mst_branch) { 9870 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 9871 struct hdcp_workqueue *hdcp_w = 9872 &hdcp_work[aconnector->dc_link->link_index]; 9873 9874 hdcp_w->hdcp_content_type[connector->index] = 9875 new_con_state->hdcp_content_type; 9876 hdcp_w->content_protection[connector->index] = 9877 new_con_state->content_protection; 9878 } 9879 9880 if (new_crtc_state && new_crtc_state->mode_changed && 9881 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 9882 enable_encryption = true; 9883 9884 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 9885 9886 if (aconnector->dc_link) 9887 hdcp_update_display( 9888 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9889 new_con_state->hdcp_content_type, enable_encryption); 9890 } 9891 } 9892 9893 /* Handle connector state changes */ 9894 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9895 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9896 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9897 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9898 struct dc_surface_update *dummy_updates; 9899 struct dc_stream_update stream_update; 9900 struct dc_info_packet hdr_packet; 9901 struct dc_stream_status *status = NULL; 9902 bool abm_changed, hdr_changed, scaling_changed; 9903 9904 memset(&stream_update, 0, sizeof(stream_update)); 9905 9906 if (acrtc) { 9907 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9908 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9909 } 9910 9911 /* Skip any modesets/resets */ 9912 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9913 continue; 9914 9915 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9916 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9917 9918 scaling_changed = is_scaling_state_different(dm_new_con_state, 9919 dm_old_con_state); 9920 9921 abm_changed = dm_new_crtc_state->abm_level != 9922 dm_old_crtc_state->abm_level; 9923 9924 hdr_changed = 9925 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9926 9927 if (!scaling_changed && !abm_changed && !hdr_changed) 9928 continue; 9929 9930 stream_update.stream = dm_new_crtc_state->stream; 9931 if (scaling_changed) { 9932 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9933 dm_new_con_state, dm_new_crtc_state->stream); 9934 9935 stream_update.src = dm_new_crtc_state->stream->src; 9936 stream_update.dst = dm_new_crtc_state->stream->dst; 9937 } 9938 9939 if (abm_changed) { 9940 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9941 9942 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9943 } 9944 9945 if (hdr_changed) { 9946 fill_hdr_info_packet(new_con_state, &hdr_packet); 9947 stream_update.hdr_static_metadata = &hdr_packet; 9948 } 9949 9950 status = dc_stream_get_status(dm_new_crtc_state->stream); 9951 9952 if (WARN_ON(!status)) 9953 continue; 9954 9955 WARN_ON(!status->plane_count); 9956 9957 /* 9958 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9959 * Here we create an empty update on each plane. 9960 * To fix this, DC should permit updating only stream properties. 9961 */ 9962 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 9963 if (!dummy_updates) { 9964 DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); 9965 continue; 9966 } 9967 for (j = 0; j < status->plane_count; j++) 9968 dummy_updates[j].surface = status->plane_states[0]; 9969 9970 sort(dummy_updates, status->plane_count, 9971 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); 9972 9973 mutex_lock(&dm->dc_lock); 9974 dc_exit_ips_for_hw_access(dm->dc); 9975 dc_update_planes_and_stream(dm->dc, 9976 dummy_updates, 9977 status->plane_count, 9978 dm_new_crtc_state->stream, 9979 &stream_update); 9980 mutex_unlock(&dm->dc_lock); 9981 kfree(dummy_updates); 9982 } 9983 9984 /** 9985 * Enable interrupts for CRTCs that are newly enabled or went through 9986 * a modeset. It was intentionally deferred until after the front end 9987 * state was modified to wait until the OTG was on and so the IRQ 9988 * handlers didn't access stale or invalid state. 9989 */ 9990 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9991 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9992 #ifdef CONFIG_DEBUG_FS 9993 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9994 #endif 9995 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9996 if (old_crtc_state->active && !new_crtc_state->active) 9997 crtc_disable_count++; 9998 9999 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10000 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10001 10002 /* For freesync config update on crtc state and params for irq */ 10003 update_stream_irq_parameters(dm, dm_new_crtc_state); 10004 10005 #ifdef CONFIG_DEBUG_FS 10006 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10007 cur_crc_src = acrtc->dm_irq_params.crc_src; 10008 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10009 #endif 10010 10011 if (new_crtc_state->active && 10012 (!old_crtc_state->active || 10013 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10014 dc_stream_retain(dm_new_crtc_state->stream); 10015 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10016 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); 10017 } 10018 /* Handle vrr on->off / off->on transitions */ 10019 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 10020 10021 #ifdef CONFIG_DEBUG_FS 10022 if (new_crtc_state->active && 10023 (!old_crtc_state->active || 10024 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10025 /** 10026 * Frontend may have changed so reapply the CRC capture 10027 * settings for the stream. 10028 */ 10029 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10030 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10031 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10032 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10033 acrtc->dm_irq_params.window_param.update_win = true; 10034 10035 /** 10036 * It takes 2 frames for HW to stably generate CRC when 10037 * resuming from suspend, so we set skip_frame_cnt 2. 10038 */ 10039 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 10040 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10041 } 10042 #endif 10043 if (amdgpu_dm_crtc_configure_crc_source( 10044 crtc, dm_new_crtc_state, cur_crc_src)) 10045 drm_dbg_atomic(dev, "Failed to configure crc source"); 10046 } 10047 } 10048 #endif 10049 } 10050 10051 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 10052 if (new_crtc_state->async_flip) 10053 wait_for_vblank = false; 10054 10055 /* update planes when needed per crtc*/ 10056 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 10057 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10058 10059 if (dm_new_crtc_state->stream) 10060 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 10061 } 10062 10063 /* Enable writeback */ 10064 for_each_new_connector_in_state(state, connector, new_con_state, i) { 10065 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10066 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10067 10068 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 10069 continue; 10070 10071 if (!new_con_state->writeback_job) 10072 continue; 10073 10074 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10075 10076 if (!new_crtc_state) 10077 continue; 10078 10079 if (acrtc->wb_enabled) 10080 continue; 10081 10082 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10083 10084 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 10085 acrtc->wb_enabled = true; 10086 } 10087 10088 /* Update audio instances for each connector. */ 10089 amdgpu_dm_commit_audio(dev, state); 10090 10091 /* restore the backlight level */ 10092 for (i = 0; i < dm->num_of_edps; i++) { 10093 if (dm->backlight_dev[i] && 10094 (dm->actual_brightness[i] != dm->brightness[i])) 10095 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10096 } 10097 10098 /* 10099 * send vblank event on all events not handled in flip and 10100 * mark consumed event for drm_atomic_helper_commit_hw_done 10101 */ 10102 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10103 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10104 10105 if (new_crtc_state->event) 10106 drm_send_event_locked(dev, &new_crtc_state->event->base); 10107 10108 new_crtc_state->event = NULL; 10109 } 10110 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10111 10112 /* Signal HW programming completion */ 10113 drm_atomic_helper_commit_hw_done(state); 10114 10115 if (wait_for_vblank) 10116 drm_atomic_helper_wait_for_flip_done(dev, state); 10117 10118 drm_atomic_helper_cleanup_planes(dev, state); 10119 10120 /* Don't free the memory if we are hitting this as part of suspend. 10121 * This way we don't free any memory during suspend; see 10122 * amdgpu_bo_free_kernel(). The memory will be freed in the first 10123 * non-suspend modeset or when the driver is torn down. 10124 */ 10125 if (!adev->in_suspend) { 10126 /* return the stolen vga memory back to VRAM */ 10127 if (!adev->mman.keep_stolen_vga_memory) 10128 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 10129 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 10130 } 10131 10132 /* 10133 * Finally, drop a runtime PM reference for each newly disabled CRTC, 10134 * so we can put the GPU into runtime suspend if we're not driving any 10135 * displays anymore 10136 */ 10137 for (i = 0; i < crtc_disable_count; i++) 10138 pm_runtime_put_autosuspend(dev->dev); 10139 pm_runtime_mark_last_busy(dev->dev); 10140 10141 trace_amdgpu_dm_atomic_commit_tail_finish(state); 10142 } 10143 10144 static int dm_force_atomic_commit(struct drm_connector *connector) 10145 { 10146 int ret = 0; 10147 struct drm_device *ddev = connector->dev; 10148 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 10149 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10150 struct drm_plane *plane = disconnected_acrtc->base.primary; 10151 struct drm_connector_state *conn_state; 10152 struct drm_crtc_state *crtc_state; 10153 struct drm_plane_state *plane_state; 10154 10155 if (!state) 10156 return -ENOMEM; 10157 10158 state->acquire_ctx = ddev->mode_config.acquire_ctx; 10159 10160 /* Construct an atomic state to restore previous display setting */ 10161 10162 /* 10163 * Attach connectors to drm_atomic_state 10164 */ 10165 conn_state = drm_atomic_get_connector_state(state, connector); 10166 10167 ret = PTR_ERR_OR_ZERO(conn_state); 10168 if (ret) 10169 goto out; 10170 10171 /* Attach crtc to drm_atomic_state*/ 10172 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 10173 10174 ret = PTR_ERR_OR_ZERO(crtc_state); 10175 if (ret) 10176 goto out; 10177 10178 /* force a restore */ 10179 crtc_state->mode_changed = true; 10180 10181 /* Attach plane to drm_atomic_state */ 10182 plane_state = drm_atomic_get_plane_state(state, plane); 10183 10184 ret = PTR_ERR_OR_ZERO(plane_state); 10185 if (ret) 10186 goto out; 10187 10188 /* Call commit internally with the state we just constructed */ 10189 ret = drm_atomic_commit(state); 10190 10191 out: 10192 drm_atomic_state_put(state); 10193 if (ret) 10194 DRM_ERROR("Restoring old state failed with %i\n", ret); 10195 10196 return ret; 10197 } 10198 10199 /* 10200 * This function handles all cases when set mode does not come upon hotplug. 10201 * This includes when a display is unplugged then plugged back into the 10202 * same port and when running without usermode desktop manager supprot 10203 */ 10204 void dm_restore_drm_connector_state(struct drm_device *dev, 10205 struct drm_connector *connector) 10206 { 10207 struct amdgpu_dm_connector *aconnector; 10208 struct amdgpu_crtc *disconnected_acrtc; 10209 struct dm_crtc_state *acrtc_state; 10210 10211 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10212 return; 10213 10214 aconnector = to_amdgpu_dm_connector(connector); 10215 10216 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10217 return; 10218 10219 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10220 if (!disconnected_acrtc) 10221 return; 10222 10223 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10224 if (!acrtc_state->stream) 10225 return; 10226 10227 /* 10228 * If the previous sink is not released and different from the current, 10229 * we deduce we are in a state where we can not rely on usermode call 10230 * to turn on the display, so we do it here 10231 */ 10232 if (acrtc_state->stream->sink != aconnector->dc_sink) 10233 dm_force_atomic_commit(&aconnector->base); 10234 } 10235 10236 /* 10237 * Grabs all modesetting locks to serialize against any blocking commits, 10238 * Waits for completion of all non blocking commits. 10239 */ 10240 static int do_aquire_global_lock(struct drm_device *dev, 10241 struct drm_atomic_state *state) 10242 { 10243 struct drm_crtc *crtc; 10244 struct drm_crtc_commit *commit; 10245 long ret; 10246 10247 /* 10248 * Adding all modeset locks to aquire_ctx will 10249 * ensure that when the framework release it the 10250 * extra locks we are locking here will get released to 10251 */ 10252 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10253 if (ret) 10254 return ret; 10255 10256 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10257 spin_lock(&crtc->commit_lock); 10258 commit = list_first_entry_or_null(&crtc->commit_list, 10259 struct drm_crtc_commit, commit_entry); 10260 if (commit) 10261 drm_crtc_commit_get(commit); 10262 spin_unlock(&crtc->commit_lock); 10263 10264 if (!commit) 10265 continue; 10266 10267 /* 10268 * Make sure all pending HW programming completed and 10269 * page flips done 10270 */ 10271 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10272 10273 if (ret > 0) 10274 ret = wait_for_completion_interruptible_timeout( 10275 &commit->flip_done, 10*HZ); 10276 10277 if (ret == 0) 10278 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 10279 crtc->base.id, crtc->name); 10280 10281 drm_crtc_commit_put(commit); 10282 } 10283 10284 return ret < 0 ? ret : 0; 10285 } 10286 10287 static void get_freesync_config_for_crtc( 10288 struct dm_crtc_state *new_crtc_state, 10289 struct dm_connector_state *new_con_state) 10290 { 10291 struct mod_freesync_config config = {0}; 10292 struct amdgpu_dm_connector *aconnector; 10293 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10294 int vrefresh = drm_mode_vrefresh(mode); 10295 bool fs_vid_mode = false; 10296 10297 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10298 return; 10299 10300 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 10301 10302 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10303 vrefresh >= aconnector->min_vfreq && 10304 vrefresh <= aconnector->max_vfreq; 10305 10306 if (new_crtc_state->vrr_supported) { 10307 new_crtc_state->stream->ignore_msa_timing_param = true; 10308 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10309 10310 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10311 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10312 config.vsif_supported = true; 10313 config.btr = true; 10314 10315 if (fs_vid_mode) { 10316 config.state = VRR_STATE_ACTIVE_FIXED; 10317 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10318 goto out; 10319 } else if (new_crtc_state->base.vrr_enabled) { 10320 config.state = VRR_STATE_ACTIVE_VARIABLE; 10321 } else { 10322 config.state = VRR_STATE_INACTIVE; 10323 } 10324 } 10325 out: 10326 new_crtc_state->freesync_config = config; 10327 } 10328 10329 static void reset_freesync_config_for_crtc( 10330 struct dm_crtc_state *new_crtc_state) 10331 { 10332 new_crtc_state->vrr_supported = false; 10333 10334 memset(&new_crtc_state->vrr_infopacket, 0, 10335 sizeof(new_crtc_state->vrr_infopacket)); 10336 } 10337 10338 static bool 10339 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10340 struct drm_crtc_state *new_crtc_state) 10341 { 10342 const struct drm_display_mode *old_mode, *new_mode; 10343 10344 if (!old_crtc_state || !new_crtc_state) 10345 return false; 10346 10347 old_mode = &old_crtc_state->mode; 10348 new_mode = &new_crtc_state->mode; 10349 10350 if (old_mode->clock == new_mode->clock && 10351 old_mode->hdisplay == new_mode->hdisplay && 10352 old_mode->vdisplay == new_mode->vdisplay && 10353 old_mode->htotal == new_mode->htotal && 10354 old_mode->vtotal != new_mode->vtotal && 10355 old_mode->hsync_start == new_mode->hsync_start && 10356 old_mode->vsync_start != new_mode->vsync_start && 10357 old_mode->hsync_end == new_mode->hsync_end && 10358 old_mode->vsync_end != new_mode->vsync_end && 10359 old_mode->hskew == new_mode->hskew && 10360 old_mode->vscan == new_mode->vscan && 10361 (old_mode->vsync_end - old_mode->vsync_start) == 10362 (new_mode->vsync_end - new_mode->vsync_start)) 10363 return true; 10364 10365 return false; 10366 } 10367 10368 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 10369 { 10370 u64 num, den, res; 10371 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10372 10373 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10374 10375 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10376 den = (unsigned long long)new_crtc_state->mode.htotal * 10377 (unsigned long long)new_crtc_state->mode.vtotal; 10378 10379 res = div_u64(num, den); 10380 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10381 } 10382 10383 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10384 struct drm_atomic_state *state, 10385 struct drm_crtc *crtc, 10386 struct drm_crtc_state *old_crtc_state, 10387 struct drm_crtc_state *new_crtc_state, 10388 bool enable, 10389 bool *lock_and_validation_needed) 10390 { 10391 struct dm_atomic_state *dm_state = NULL; 10392 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10393 struct dc_stream_state *new_stream; 10394 int ret = 0; 10395 10396 /* 10397 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10398 * update changed items 10399 */ 10400 struct amdgpu_crtc *acrtc = NULL; 10401 struct drm_connector *connector = NULL; 10402 struct amdgpu_dm_connector *aconnector = NULL; 10403 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10404 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10405 10406 new_stream = NULL; 10407 10408 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10409 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10410 acrtc = to_amdgpu_crtc(crtc); 10411 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10412 if (connector) 10413 aconnector = to_amdgpu_dm_connector(connector); 10414 10415 /* TODO This hack should go away */ 10416 if (connector && enable) { 10417 /* Make sure fake sink is created in plug-in scenario */ 10418 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10419 connector); 10420 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10421 connector); 10422 10423 if (IS_ERR(drm_new_conn_state)) { 10424 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 10425 goto fail; 10426 } 10427 10428 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10429 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10430 10431 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10432 goto skip_modeset; 10433 10434 new_stream = create_validate_stream_for_sink(aconnector, 10435 &new_crtc_state->mode, 10436 dm_new_conn_state, 10437 dm_old_crtc_state->stream); 10438 10439 /* 10440 * we can have no stream on ACTION_SET if a display 10441 * was disconnected during S3, in this case it is not an 10442 * error, the OS will be updated after detection, and 10443 * will do the right thing on next atomic commit 10444 */ 10445 10446 if (!new_stream) { 10447 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 10448 __func__, acrtc->base.base.id); 10449 ret = -ENOMEM; 10450 goto fail; 10451 } 10452 10453 /* 10454 * TODO: Check VSDB bits to decide whether this should 10455 * be enabled or not. 10456 */ 10457 new_stream->triggered_crtc_reset.enabled = 10458 dm->force_timing_sync; 10459 10460 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10461 10462 ret = fill_hdr_info_packet(drm_new_conn_state, 10463 &new_stream->hdr_static_metadata); 10464 if (ret) 10465 goto fail; 10466 10467 /* 10468 * If we already removed the old stream from the context 10469 * (and set the new stream to NULL) then we can't reuse 10470 * the old stream even if the stream and scaling are unchanged. 10471 * We'll hit the BUG_ON and black screen. 10472 * 10473 * TODO: Refactor this function to allow this check to work 10474 * in all conditions. 10475 */ 10476 if (amdgpu_freesync_vid_mode && 10477 dm_new_crtc_state->stream && 10478 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10479 goto skip_modeset; 10480 10481 if (dm_new_crtc_state->stream && 10482 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10483 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10484 new_crtc_state->mode_changed = false; 10485 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10486 new_crtc_state->mode_changed); 10487 } 10488 } 10489 10490 /* mode_changed flag may get updated above, need to check again */ 10491 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10492 goto skip_modeset; 10493 10494 drm_dbg_state(state->dev, 10495 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 10496 acrtc->crtc_id, 10497 new_crtc_state->enable, 10498 new_crtc_state->active, 10499 new_crtc_state->planes_changed, 10500 new_crtc_state->mode_changed, 10501 new_crtc_state->active_changed, 10502 new_crtc_state->connectors_changed); 10503 10504 /* Remove stream for any changed/disabled CRTC */ 10505 if (!enable) { 10506 10507 if (!dm_old_crtc_state->stream) 10508 goto skip_modeset; 10509 10510 /* Unset freesync video if it was active before */ 10511 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 10512 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 10513 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 10514 } 10515 10516 /* Now check if we should set freesync video mode */ 10517 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 10518 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10519 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 10520 is_timing_unchanged_for_freesync(new_crtc_state, 10521 old_crtc_state)) { 10522 new_crtc_state->mode_changed = false; 10523 DRM_DEBUG_DRIVER( 10524 "Mode change not required for front porch change, setting mode_changed to %d", 10525 new_crtc_state->mode_changed); 10526 10527 set_freesync_fixed_config(dm_new_crtc_state); 10528 10529 goto skip_modeset; 10530 } else if (amdgpu_freesync_vid_mode && aconnector && 10531 is_freesync_video_mode(&new_crtc_state->mode, 10532 aconnector)) { 10533 struct drm_display_mode *high_mode; 10534 10535 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10536 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 10537 set_freesync_fixed_config(dm_new_crtc_state); 10538 } 10539 10540 ret = dm_atomic_get_state(state, &dm_state); 10541 if (ret) 10542 goto fail; 10543 10544 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10545 crtc->base.id); 10546 10547 /* i.e. reset mode */ 10548 if (dc_state_remove_stream( 10549 dm->dc, 10550 dm_state->context, 10551 dm_old_crtc_state->stream) != DC_OK) { 10552 ret = -EINVAL; 10553 goto fail; 10554 } 10555 10556 dc_stream_release(dm_old_crtc_state->stream); 10557 dm_new_crtc_state->stream = NULL; 10558 10559 reset_freesync_config_for_crtc(dm_new_crtc_state); 10560 10561 *lock_and_validation_needed = true; 10562 10563 } else {/* Add stream for any updated/enabled CRTC */ 10564 /* 10565 * Quick fix to prevent NULL pointer on new_stream when 10566 * added MST connectors not found in existing crtc_state in the chained mode 10567 * TODO: need to dig out the root cause of that 10568 */ 10569 if (!connector) 10570 goto skip_modeset; 10571 10572 if (modereset_required(new_crtc_state)) 10573 goto skip_modeset; 10574 10575 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 10576 dm_old_crtc_state->stream)) { 10577 10578 WARN_ON(dm_new_crtc_state->stream); 10579 10580 ret = dm_atomic_get_state(state, &dm_state); 10581 if (ret) 10582 goto fail; 10583 10584 dm_new_crtc_state->stream = new_stream; 10585 10586 dc_stream_retain(new_stream); 10587 10588 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10589 crtc->base.id); 10590 10591 if (dc_state_add_stream( 10592 dm->dc, 10593 dm_state->context, 10594 dm_new_crtc_state->stream) != DC_OK) { 10595 ret = -EINVAL; 10596 goto fail; 10597 } 10598 10599 *lock_and_validation_needed = true; 10600 } 10601 } 10602 10603 skip_modeset: 10604 /* Release extra reference */ 10605 if (new_stream) 10606 dc_stream_release(new_stream); 10607 10608 /* 10609 * We want to do dc stream updates that do not require a 10610 * full modeset below. 10611 */ 10612 if (!(enable && connector && new_crtc_state->active)) 10613 return 0; 10614 /* 10615 * Given above conditions, the dc state cannot be NULL because: 10616 * 1. We're in the process of enabling CRTCs (just been added 10617 * to the dc context, or already is on the context) 10618 * 2. Has a valid connector attached, and 10619 * 3. Is currently active and enabled. 10620 * => The dc stream state currently exists. 10621 */ 10622 BUG_ON(dm_new_crtc_state->stream == NULL); 10623 10624 /* Scaling or underscan settings */ 10625 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10626 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10627 update_stream_scaling_settings( 10628 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10629 10630 /* ABM settings */ 10631 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10632 10633 /* 10634 * Color management settings. We also update color properties 10635 * when a modeset is needed, to ensure it gets reprogrammed. 10636 */ 10637 if (dm_new_crtc_state->base.color_mgmt_changed || 10638 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 10639 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10640 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10641 if (ret) 10642 goto fail; 10643 } 10644 10645 /* Update Freesync settings. */ 10646 get_freesync_config_for_crtc(dm_new_crtc_state, 10647 dm_new_conn_state); 10648 10649 return ret; 10650 10651 fail: 10652 if (new_stream) 10653 dc_stream_release(new_stream); 10654 return ret; 10655 } 10656 10657 static bool should_reset_plane(struct drm_atomic_state *state, 10658 struct drm_plane *plane, 10659 struct drm_plane_state *old_plane_state, 10660 struct drm_plane_state *new_plane_state) 10661 { 10662 struct drm_plane *other; 10663 struct drm_plane_state *old_other_state, *new_other_state; 10664 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10665 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 10666 struct amdgpu_device *adev = drm_to_adev(plane->dev); 10667 int i; 10668 10669 /* 10670 * TODO: Remove this hack for all asics once it proves that the 10671 * fast updates works fine on DCN3.2+. 10672 */ 10673 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 10674 state->allow_modeset) 10675 return true; 10676 10677 /* Exit early if we know that we're adding or removing the plane. */ 10678 if (old_plane_state->crtc != new_plane_state->crtc) 10679 return true; 10680 10681 /* old crtc == new_crtc == NULL, plane not in context. */ 10682 if (!new_plane_state->crtc) 10683 return false; 10684 10685 new_crtc_state = 10686 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10687 old_crtc_state = 10688 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); 10689 10690 if (!new_crtc_state) 10691 return true; 10692 10693 /* 10694 * A change in cursor mode means a new dc pipe needs to be acquired or 10695 * released from the state 10696 */ 10697 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 10698 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 10699 if (plane->type == DRM_PLANE_TYPE_CURSOR && 10700 old_dm_crtc_state != NULL && 10701 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { 10702 return true; 10703 } 10704 10705 /* CRTC Degamma changes currently require us to recreate planes. */ 10706 if (new_crtc_state->color_mgmt_changed) 10707 return true; 10708 10709 /* 10710 * On zpos change, planes need to be reordered by removing and re-adding 10711 * them one by one to the dc state, in order of descending zpos. 10712 * 10713 * TODO: We can likely skip bandwidth validation if the only thing that 10714 * changed about the plane was it'z z-ordering. 10715 */ 10716 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) 10717 return true; 10718 10719 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10720 return true; 10721 10722 /* 10723 * If there are any new primary or overlay planes being added or 10724 * removed then the z-order can potentially change. To ensure 10725 * correct z-order and pipe acquisition the current DC architecture 10726 * requires us to remove and recreate all existing planes. 10727 * 10728 * TODO: Come up with a more elegant solution for this. 10729 */ 10730 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10731 struct amdgpu_framebuffer *old_afb, *new_afb; 10732 struct dm_plane_state *dm_new_other_state, *dm_old_other_state; 10733 10734 dm_new_other_state = to_dm_plane_state(new_other_state); 10735 dm_old_other_state = to_dm_plane_state(old_other_state); 10736 10737 if (other->type == DRM_PLANE_TYPE_CURSOR) 10738 continue; 10739 10740 if (old_other_state->crtc != new_plane_state->crtc && 10741 new_other_state->crtc != new_plane_state->crtc) 10742 continue; 10743 10744 if (old_other_state->crtc != new_other_state->crtc) 10745 return true; 10746 10747 /* Src/dst size and scaling updates. */ 10748 if (old_other_state->src_w != new_other_state->src_w || 10749 old_other_state->src_h != new_other_state->src_h || 10750 old_other_state->crtc_w != new_other_state->crtc_w || 10751 old_other_state->crtc_h != new_other_state->crtc_h) 10752 return true; 10753 10754 /* Rotation / mirroring updates. */ 10755 if (old_other_state->rotation != new_other_state->rotation) 10756 return true; 10757 10758 /* Blending updates. */ 10759 if (old_other_state->pixel_blend_mode != 10760 new_other_state->pixel_blend_mode) 10761 return true; 10762 10763 /* Alpha updates. */ 10764 if (old_other_state->alpha != new_other_state->alpha) 10765 return true; 10766 10767 /* Colorspace changes. */ 10768 if (old_other_state->color_range != new_other_state->color_range || 10769 old_other_state->color_encoding != new_other_state->color_encoding) 10770 return true; 10771 10772 /* HDR/Transfer Function changes. */ 10773 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || 10774 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || 10775 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || 10776 dm_old_other_state->ctm != dm_new_other_state->ctm || 10777 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || 10778 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || 10779 dm_old_other_state->lut3d != dm_new_other_state->lut3d || 10780 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || 10781 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) 10782 return true; 10783 10784 /* Framebuffer checks fall at the end. */ 10785 if (!old_other_state->fb || !new_other_state->fb) 10786 continue; 10787 10788 /* Pixel format changes can require bandwidth updates. */ 10789 if (old_other_state->fb->format != new_other_state->fb->format) 10790 return true; 10791 10792 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10793 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10794 10795 /* Tiling and DCC changes also require bandwidth updates. */ 10796 if (old_afb->tiling_flags != new_afb->tiling_flags || 10797 old_afb->base.modifier != new_afb->base.modifier) 10798 return true; 10799 } 10800 10801 return false; 10802 } 10803 10804 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10805 struct drm_plane_state *new_plane_state, 10806 struct drm_framebuffer *fb) 10807 { 10808 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10809 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10810 unsigned int pitch; 10811 bool linear; 10812 10813 if (fb->width > new_acrtc->max_cursor_width || 10814 fb->height > new_acrtc->max_cursor_height) { 10815 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10816 new_plane_state->fb->width, 10817 new_plane_state->fb->height); 10818 return -EINVAL; 10819 } 10820 if (new_plane_state->src_w != fb->width << 16 || 10821 new_plane_state->src_h != fb->height << 16) { 10822 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10823 return -EINVAL; 10824 } 10825 10826 /* Pitch in pixels */ 10827 pitch = fb->pitches[0] / fb->format->cpp[0]; 10828 10829 if (fb->width != pitch) { 10830 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10831 fb->width, pitch); 10832 return -EINVAL; 10833 } 10834 10835 switch (pitch) { 10836 case 64: 10837 case 128: 10838 case 256: 10839 /* FB pitch is supported by cursor plane */ 10840 break; 10841 default: 10842 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10843 return -EINVAL; 10844 } 10845 10846 /* Core DRM takes care of checking FB modifiers, so we only need to 10847 * check tiling flags when the FB doesn't have a modifier. 10848 */ 10849 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10850 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 10851 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; 10852 } else if (adev->family >= AMDGPU_FAMILY_AI) { 10853 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10854 } else { 10855 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10856 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10857 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10858 } 10859 if (!linear) { 10860 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10861 return -EINVAL; 10862 } 10863 } 10864 10865 return 0; 10866 } 10867 10868 /* 10869 * Helper function for checking the cursor in native mode 10870 */ 10871 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, 10872 struct drm_plane *plane, 10873 struct drm_plane_state *new_plane_state, 10874 bool enable) 10875 { 10876 10877 struct amdgpu_crtc *new_acrtc; 10878 int ret; 10879 10880 if (!enable || !new_plane_crtc || 10881 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10882 return 0; 10883 10884 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10885 10886 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10887 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10888 return -EINVAL; 10889 } 10890 10891 if (new_plane_state->fb) { 10892 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10893 new_plane_state->fb); 10894 if (ret) 10895 return ret; 10896 } 10897 10898 return 0; 10899 } 10900 10901 static bool dm_should_update_native_cursor(struct drm_atomic_state *state, 10902 struct drm_crtc *old_plane_crtc, 10903 struct drm_crtc *new_plane_crtc, 10904 bool enable) 10905 { 10906 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10907 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10908 10909 if (!enable) { 10910 if (old_plane_crtc == NULL) 10911 return true; 10912 10913 old_crtc_state = drm_atomic_get_old_crtc_state( 10914 state, old_plane_crtc); 10915 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10916 10917 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10918 } else { 10919 if (new_plane_crtc == NULL) 10920 return true; 10921 10922 new_crtc_state = drm_atomic_get_new_crtc_state( 10923 state, new_plane_crtc); 10924 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10925 10926 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 10927 } 10928 } 10929 10930 static int dm_update_plane_state(struct dc *dc, 10931 struct drm_atomic_state *state, 10932 struct drm_plane *plane, 10933 struct drm_plane_state *old_plane_state, 10934 struct drm_plane_state *new_plane_state, 10935 bool enable, 10936 bool *lock_and_validation_needed, 10937 bool *is_top_most_overlay) 10938 { 10939 10940 struct dm_atomic_state *dm_state = NULL; 10941 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10942 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10943 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10944 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10945 bool needs_reset, update_native_cursor; 10946 int ret = 0; 10947 10948 10949 new_plane_crtc = new_plane_state->crtc; 10950 old_plane_crtc = old_plane_state->crtc; 10951 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10952 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10953 10954 update_native_cursor = dm_should_update_native_cursor(state, 10955 old_plane_crtc, 10956 new_plane_crtc, 10957 enable); 10958 10959 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { 10960 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 10961 new_plane_state, enable); 10962 if (ret) 10963 return ret; 10964 10965 return 0; 10966 } 10967 10968 needs_reset = should_reset_plane(state, plane, old_plane_state, 10969 new_plane_state); 10970 10971 /* Remove any changed/removed planes */ 10972 if (!enable) { 10973 if (!needs_reset) 10974 return 0; 10975 10976 if (!old_plane_crtc) 10977 return 0; 10978 10979 old_crtc_state = drm_atomic_get_old_crtc_state( 10980 state, old_plane_crtc); 10981 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10982 10983 if (!dm_old_crtc_state->stream) 10984 return 0; 10985 10986 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10987 plane->base.id, old_plane_crtc->base.id); 10988 10989 ret = dm_atomic_get_state(state, &dm_state); 10990 if (ret) 10991 return ret; 10992 10993 if (!dc_state_remove_plane( 10994 dc, 10995 dm_old_crtc_state->stream, 10996 dm_old_plane_state->dc_state, 10997 dm_state->context)) { 10998 10999 return -EINVAL; 11000 } 11001 11002 if (dm_old_plane_state->dc_state) 11003 dc_plane_state_release(dm_old_plane_state->dc_state); 11004 11005 dm_new_plane_state->dc_state = NULL; 11006 11007 *lock_and_validation_needed = true; 11008 11009 } else { /* Add new planes */ 11010 struct dc_plane_state *dc_new_plane_state; 11011 11012 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 11013 return 0; 11014 11015 if (!new_plane_crtc) 11016 return 0; 11017 11018 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 11019 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11020 11021 if (!dm_new_crtc_state->stream) 11022 return 0; 11023 11024 if (!needs_reset) 11025 return 0; 11026 11027 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 11028 if (ret) 11029 goto out; 11030 11031 WARN_ON(dm_new_plane_state->dc_state); 11032 11033 dc_new_plane_state = dc_create_plane_state(dc); 11034 if (!dc_new_plane_state) { 11035 ret = -ENOMEM; 11036 goto out; 11037 } 11038 11039 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 11040 plane->base.id, new_plane_crtc->base.id); 11041 11042 ret = fill_dc_plane_attributes( 11043 drm_to_adev(new_plane_crtc->dev), 11044 dc_new_plane_state, 11045 new_plane_state, 11046 new_crtc_state); 11047 if (ret) { 11048 dc_plane_state_release(dc_new_plane_state); 11049 goto out; 11050 } 11051 11052 ret = dm_atomic_get_state(state, &dm_state); 11053 if (ret) { 11054 dc_plane_state_release(dc_new_plane_state); 11055 goto out; 11056 } 11057 11058 /* 11059 * Any atomic check errors that occur after this will 11060 * not need a release. The plane state will be attached 11061 * to the stream, and therefore part of the atomic 11062 * state. It'll be released when the atomic state is 11063 * cleaned. 11064 */ 11065 if (!dc_state_add_plane( 11066 dc, 11067 dm_new_crtc_state->stream, 11068 dc_new_plane_state, 11069 dm_state->context)) { 11070 11071 dc_plane_state_release(dc_new_plane_state); 11072 ret = -EINVAL; 11073 goto out; 11074 } 11075 11076 dm_new_plane_state->dc_state = dc_new_plane_state; 11077 11078 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 11079 11080 /* Tell DC to do a full surface update every time there 11081 * is a plane change. Inefficient, but works for now. 11082 */ 11083 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 11084 11085 *lock_and_validation_needed = true; 11086 } 11087 11088 out: 11089 /* If enabling cursor overlay failed, attempt fallback to native mode */ 11090 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { 11091 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11092 new_plane_state, enable); 11093 if (ret) 11094 return ret; 11095 11096 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; 11097 } 11098 11099 return ret; 11100 } 11101 11102 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 11103 int *src_w, int *src_h) 11104 { 11105 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 11106 case DRM_MODE_ROTATE_90: 11107 case DRM_MODE_ROTATE_270: 11108 *src_w = plane_state->src_h >> 16; 11109 *src_h = plane_state->src_w >> 16; 11110 break; 11111 case DRM_MODE_ROTATE_0: 11112 case DRM_MODE_ROTATE_180: 11113 default: 11114 *src_w = plane_state->src_w >> 16; 11115 *src_h = plane_state->src_h >> 16; 11116 break; 11117 } 11118 } 11119 11120 static void 11121 dm_get_plane_scale(struct drm_plane_state *plane_state, 11122 int *out_plane_scale_w, int *out_plane_scale_h) 11123 { 11124 int plane_src_w, plane_src_h; 11125 11126 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 11127 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; 11128 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; 11129 } 11130 11131 /* 11132 * The normalized_zpos value cannot be used by this iterator directly. It's only 11133 * calculated for enabled planes, potentially causing normalized_zpos collisions 11134 * between enabled/disabled planes in the atomic state. We need a unique value 11135 * so that the iterator will not generate the same object twice, or loop 11136 * indefinitely. 11137 */ 11138 static inline struct __drm_planes_state *__get_next_zpos( 11139 struct drm_atomic_state *state, 11140 struct __drm_planes_state *prev) 11141 { 11142 unsigned int highest_zpos = 0, prev_zpos = 256; 11143 uint32_t highest_id = 0, prev_id = UINT_MAX; 11144 struct drm_plane_state *new_plane_state; 11145 struct drm_plane *plane; 11146 int i, highest_i = -1; 11147 11148 if (prev != NULL) { 11149 prev_zpos = prev->new_state->zpos; 11150 prev_id = prev->ptr->base.id; 11151 } 11152 11153 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 11154 /* Skip planes with higher zpos than the previously returned */ 11155 if (new_plane_state->zpos > prev_zpos || 11156 (new_plane_state->zpos == prev_zpos && 11157 plane->base.id >= prev_id)) 11158 continue; 11159 11160 /* Save the index of the plane with highest zpos */ 11161 if (new_plane_state->zpos > highest_zpos || 11162 (new_plane_state->zpos == highest_zpos && 11163 plane->base.id > highest_id)) { 11164 highest_zpos = new_plane_state->zpos; 11165 highest_id = plane->base.id; 11166 highest_i = i; 11167 } 11168 } 11169 11170 if (highest_i < 0) 11171 return NULL; 11172 11173 return &state->planes[highest_i]; 11174 } 11175 11176 /* 11177 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate 11178 * by descending zpos, as read from the new plane state. This is the same 11179 * ordering as defined by drm_atomic_normalize_zpos(). 11180 */ 11181 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ 11182 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ 11183 __i != NULL; __i = __get_next_zpos((__state), __i)) \ 11184 for_each_if(((plane) = __i->ptr, \ 11185 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ 11186 (old_plane_state) = __i->old_state, \ 11187 (new_plane_state) = __i->new_state, 1)) 11188 11189 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 11190 { 11191 struct drm_connector *connector; 11192 struct drm_connector_state *conn_state, *old_conn_state; 11193 struct amdgpu_dm_connector *aconnector = NULL; 11194 int i; 11195 11196 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 11197 if (!conn_state->crtc) 11198 conn_state = old_conn_state; 11199 11200 if (conn_state->crtc != crtc) 11201 continue; 11202 11203 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11204 continue; 11205 11206 aconnector = to_amdgpu_dm_connector(connector); 11207 if (!aconnector->mst_output_port || !aconnector->mst_root) 11208 aconnector = NULL; 11209 else 11210 break; 11211 } 11212 11213 if (!aconnector) 11214 return 0; 11215 11216 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 11217 } 11218 11219 /** 11220 * DOC: Cursor Modes - Native vs Overlay 11221 * 11222 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw 11223 * plane. It does not require a dedicated hw plane to enable, but it is 11224 * subjected to the same z-order and scaling as the hw plane. It also has format 11225 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB 11226 * hw plane. 11227 * 11228 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its 11229 * own scaling and z-pos. It also has no blending restrictions. It lends to a 11230 * cursor behavior more akin to a DRM client's expectations. However, it does 11231 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is 11232 * available. 11233 */ 11234 11235 /** 11236 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 11237 * @adev: amdgpu device 11238 * @state: DRM atomic state 11239 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor 11240 * @cursor_mode: Returns the required cursor mode on dm_crtc_state 11241 * 11242 * Get whether the cursor should be enabled in native mode, or overlay mode, on 11243 * the dm_crtc_state. 11244 * 11245 * The cursor should be enabled in overlay mode if there exists an underlying 11246 * plane - on which the cursor may be blended - that is either YUV formatted, or 11247 * scaled differently from the cursor. 11248 * 11249 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 11250 * calling this function. 11251 * 11252 * Return: 0 on success, or an error code if getting the cursor plane state 11253 * failed. 11254 */ 11255 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, 11256 struct drm_atomic_state *state, 11257 struct dm_crtc_state *dm_crtc_state, 11258 enum amdgpu_dm_cursor_mode *cursor_mode) 11259 { 11260 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; 11261 struct drm_crtc_state *crtc_state = &dm_crtc_state->base; 11262 struct drm_plane *plane; 11263 bool consider_mode_change = false; 11264 bool entire_crtc_covered = false; 11265 bool cursor_changed = false; 11266 int underlying_scale_w, underlying_scale_h; 11267 int cursor_scale_w, cursor_scale_h; 11268 int i; 11269 11270 /* Overlay cursor not supported on HW before DCN 11271 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 11272 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 11273 */ 11274 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 11275 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11276 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11277 return 0; 11278 } 11279 11280 /* Init cursor_mode to be the same as current */ 11281 *cursor_mode = dm_crtc_state->cursor_mode; 11282 11283 /* 11284 * Cursor mode can change if a plane's format changes, scale changes, is 11285 * enabled/disabled, or z-order changes. 11286 */ 11287 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 11288 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 11289 11290 /* Only care about planes on this CRTC */ 11291 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) 11292 continue; 11293 11294 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11295 cursor_changed = true; 11296 11297 if (drm_atomic_plane_enabling(old_plane_state, plane_state) || 11298 drm_atomic_plane_disabling(old_plane_state, plane_state) || 11299 old_plane_state->fb->format != plane_state->fb->format) { 11300 consider_mode_change = true; 11301 break; 11302 } 11303 11304 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 11305 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 11306 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 11307 consider_mode_change = true; 11308 break; 11309 } 11310 } 11311 11312 if (!consider_mode_change && !crtc_state->zpos_changed) 11313 return 0; 11314 11315 /* 11316 * If no cursor change on this CRTC, and not enabled on this CRTC, then 11317 * no need to set cursor mode. This avoids needlessly locking the cursor 11318 * state. 11319 */ 11320 if (!cursor_changed && 11321 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { 11322 return 0; 11323 } 11324 11325 cursor_state = drm_atomic_get_plane_state(state, 11326 crtc_state->crtc->cursor); 11327 if (IS_ERR(cursor_state)) 11328 return PTR_ERR(cursor_state); 11329 11330 /* Cursor is disabled */ 11331 if (!cursor_state->fb) 11332 return 0; 11333 11334 /* For all planes in descending z-order (all of which are below cursor 11335 * as per zpos definitions), check their scaling and format 11336 */ 11337 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { 11338 11339 /* Only care about non-cursor planes on this CRTC */ 11340 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || 11341 plane->type == DRM_PLANE_TYPE_CURSOR) 11342 continue; 11343 11344 /* Underlying plane is YUV format - use overlay cursor */ 11345 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 11346 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11347 return 0; 11348 } 11349 11350 dm_get_plane_scale(plane_state, 11351 &underlying_scale_w, &underlying_scale_h); 11352 dm_get_plane_scale(cursor_state, 11353 &cursor_scale_w, &cursor_scale_h); 11354 11355 /* Underlying plane has different scale - use overlay cursor */ 11356 if (cursor_scale_w != underlying_scale_w && 11357 cursor_scale_h != underlying_scale_h) { 11358 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11359 return 0; 11360 } 11361 11362 /* If this plane covers the whole CRTC, no need to check planes underneath */ 11363 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && 11364 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && 11365 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { 11366 entire_crtc_covered = true; 11367 break; 11368 } 11369 } 11370 11371 /* If planes do not cover the entire CRTC, use overlay mode to enable 11372 * cursor over holes 11373 */ 11374 if (entire_crtc_covered) 11375 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11376 else 11377 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11378 11379 return 0; 11380 } 11381 11382 /** 11383 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11384 * 11385 * @dev: The DRM device 11386 * @state: The atomic state to commit 11387 * 11388 * Validate that the given atomic state is programmable by DC into hardware. 11389 * This involves constructing a &struct dc_state reflecting the new hardware 11390 * state we wish to commit, then querying DC to see if it is programmable. It's 11391 * important not to modify the existing DC state. Otherwise, atomic_check 11392 * may unexpectedly commit hardware changes. 11393 * 11394 * When validating the DC state, it's important that the right locks are 11395 * acquired. For full updates case which removes/adds/updates streams on one 11396 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 11397 * that any such full update commit will wait for completion of any outstanding 11398 * flip using DRMs synchronization events. 11399 * 11400 * Note that DM adds the affected connectors for all CRTCs in state, when that 11401 * might not seem necessary. This is because DC stream creation requires the 11402 * DC sink, which is tied to the DRM connector state. Cleaning this up should 11403 * be possible but non-trivial - a possible TODO item. 11404 * 11405 * Return: -Error code if validation failed. 11406 */ 11407 static int amdgpu_dm_atomic_check(struct drm_device *dev, 11408 struct drm_atomic_state *state) 11409 { 11410 struct amdgpu_device *adev = drm_to_adev(dev); 11411 struct dm_atomic_state *dm_state = NULL; 11412 struct dc *dc = adev->dm.dc; 11413 struct drm_connector *connector; 11414 struct drm_connector_state *old_con_state, *new_con_state; 11415 struct drm_crtc *crtc; 11416 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11417 struct drm_plane *plane; 11418 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; 11419 enum dc_status status; 11420 int ret, i; 11421 bool lock_and_validation_needed = false; 11422 bool is_top_most_overlay = true; 11423 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11424 struct drm_dp_mst_topology_mgr *mgr; 11425 struct drm_dp_mst_topology_state *mst_state; 11426 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; 11427 11428 trace_amdgpu_dm_atomic_check_begin(state); 11429 11430 ret = drm_atomic_helper_check_modeset(dev, state); 11431 if (ret) { 11432 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); 11433 goto fail; 11434 } 11435 11436 /* Check connector changes */ 11437 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11438 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11439 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11440 11441 /* Skip connectors that are disabled or part of modeset already. */ 11442 if (!new_con_state->crtc) 11443 continue; 11444 11445 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 11446 if (IS_ERR(new_crtc_state)) { 11447 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); 11448 ret = PTR_ERR(new_crtc_state); 11449 goto fail; 11450 } 11451 11452 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 11453 dm_old_con_state->scaling != dm_new_con_state->scaling) 11454 new_crtc_state->connectors_changed = true; 11455 } 11456 11457 if (dc_resource_is_dsc_encoding_supported(dc)) { 11458 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11459 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11460 ret = add_affected_mst_dsc_crtcs(state, crtc); 11461 if (ret) { 11462 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); 11463 goto fail; 11464 } 11465 } 11466 } 11467 } 11468 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11469 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11470 11471 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 11472 !new_crtc_state->color_mgmt_changed && 11473 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 11474 dm_old_crtc_state->dsc_force_changed == false) 11475 continue; 11476 11477 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 11478 if (ret) { 11479 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); 11480 goto fail; 11481 } 11482 11483 if (!new_crtc_state->enable) 11484 continue; 11485 11486 ret = drm_atomic_add_affected_connectors(state, crtc); 11487 if (ret) { 11488 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); 11489 goto fail; 11490 } 11491 11492 ret = drm_atomic_add_affected_planes(state, crtc); 11493 if (ret) { 11494 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); 11495 goto fail; 11496 } 11497 11498 if (dm_old_crtc_state->dsc_force_changed) 11499 new_crtc_state->mode_changed = true; 11500 } 11501 11502 /* 11503 * Add all primary and overlay planes on the CRTC to the state 11504 * whenever a plane is enabled to maintain correct z-ordering 11505 * and to enable fast surface updates. 11506 */ 11507 drm_for_each_crtc(crtc, dev) { 11508 bool modified = false; 11509 11510 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11511 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11512 continue; 11513 11514 if (new_plane_state->crtc == crtc || 11515 old_plane_state->crtc == crtc) { 11516 modified = true; 11517 break; 11518 } 11519 } 11520 11521 if (!modified) 11522 continue; 11523 11524 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11525 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11526 continue; 11527 11528 new_plane_state = 11529 drm_atomic_get_plane_state(state, plane); 11530 11531 if (IS_ERR(new_plane_state)) { 11532 ret = PTR_ERR(new_plane_state); 11533 drm_dbg_atomic(dev, "new_plane_state is BAD\n"); 11534 goto fail; 11535 } 11536 } 11537 } 11538 11539 /* 11540 * DC consults the zpos (layer_index in DC terminology) to determine the 11541 * hw plane on which to enable the hw cursor (see 11542 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 11543 * atomic state, so call drm helper to normalize zpos. 11544 */ 11545 ret = drm_atomic_normalize_zpos(dev, state); 11546 if (ret) { 11547 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 11548 goto fail; 11549 } 11550 11551 /* 11552 * Determine whether cursors on each CRTC should be enabled in native or 11553 * overlay mode. 11554 */ 11555 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11556 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11557 11558 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11559 &dm_new_crtc_state->cursor_mode); 11560 if (ret) { 11561 drm_dbg(dev, "Failed to determine cursor mode\n"); 11562 goto fail; 11563 } 11564 11565 /* 11566 * If overlay cursor is needed, DC cannot go through the 11567 * native cursor update path. All enabled planes on the CRTC 11568 * need to be added for DC to not disable a plane by mistake 11569 */ 11570 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11571 ret = drm_atomic_add_affected_planes(state, crtc); 11572 if (ret) 11573 goto fail; 11574 } 11575 } 11576 11577 /* Remove exiting planes if they are modified */ 11578 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11579 if (old_plane_state->fb && new_plane_state->fb && 11580 get_mem_type(old_plane_state->fb) != 11581 get_mem_type(new_plane_state->fb)) 11582 lock_and_validation_needed = true; 11583 11584 ret = dm_update_plane_state(dc, state, plane, 11585 old_plane_state, 11586 new_plane_state, 11587 false, 11588 &lock_and_validation_needed, 11589 &is_top_most_overlay); 11590 if (ret) { 11591 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11592 goto fail; 11593 } 11594 } 11595 11596 /* Disable all crtcs which require disable */ 11597 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11598 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11599 old_crtc_state, 11600 new_crtc_state, 11601 false, 11602 &lock_and_validation_needed); 11603 if (ret) { 11604 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); 11605 goto fail; 11606 } 11607 } 11608 11609 /* Enable all crtcs which require enable */ 11610 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11611 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11612 old_crtc_state, 11613 new_crtc_state, 11614 true, 11615 &lock_and_validation_needed); 11616 if (ret) { 11617 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); 11618 goto fail; 11619 } 11620 } 11621 11622 /* Add new/modified planes */ 11623 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11624 ret = dm_update_plane_state(dc, state, plane, 11625 old_plane_state, 11626 new_plane_state, 11627 true, 11628 &lock_and_validation_needed, 11629 &is_top_most_overlay); 11630 if (ret) { 11631 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11632 goto fail; 11633 } 11634 } 11635 11636 #if defined(CONFIG_DRM_AMD_DC_FP) 11637 if (dc_resource_is_dsc_encoding_supported(dc)) { 11638 ret = pre_validate_dsc(state, &dm_state, vars); 11639 if (ret != 0) 11640 goto fail; 11641 } 11642 #endif 11643 11644 /* Run this here since we want to validate the streams we created */ 11645 ret = drm_atomic_helper_check_planes(dev, state); 11646 if (ret) { 11647 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); 11648 goto fail; 11649 } 11650 11651 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11652 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11653 if (dm_new_crtc_state->mpo_requested) 11654 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); 11655 } 11656 11657 /* Check cursor restrictions */ 11658 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11659 enum amdgpu_dm_cursor_mode required_cursor_mode; 11660 int is_rotated, is_scaled; 11661 11662 /* Overlay cusor not subject to native cursor restrictions */ 11663 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11664 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 11665 continue; 11666 11667 /* Check if rotation or scaling is enabled on DCN401 */ 11668 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && 11669 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11670 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 11671 11672 is_rotated = new_cursor_state && 11673 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); 11674 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || 11675 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); 11676 11677 if (is_rotated || is_scaled) { 11678 drm_dbg_driver( 11679 crtc->dev, 11680 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", 11681 crtc->base.id, crtc->name); 11682 ret = -EINVAL; 11683 goto fail; 11684 } 11685 } 11686 11687 /* If HW can only do native cursor, check restrictions again */ 11688 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11689 &required_cursor_mode); 11690 if (ret) { 11691 drm_dbg_driver(crtc->dev, 11692 "[CRTC:%d:%s] Checking cursor mode failed\n", 11693 crtc->base.id, crtc->name); 11694 goto fail; 11695 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11696 drm_dbg_driver(crtc->dev, 11697 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 11698 crtc->base.id, crtc->name); 11699 ret = -EINVAL; 11700 goto fail; 11701 } 11702 } 11703 11704 if (state->legacy_cursor_update) { 11705 /* 11706 * This is a fast cursor update coming from the plane update 11707 * helper, check if it can be done asynchronously for better 11708 * performance. 11709 */ 11710 state->async_update = 11711 !drm_atomic_helper_async_check(dev, state); 11712 11713 /* 11714 * Skip the remaining global validation if this is an async 11715 * update. Cursor updates can be done without affecting 11716 * state or bandwidth calcs and this avoids the performance 11717 * penalty of locking the private state object and 11718 * allocating a new dc_state. 11719 */ 11720 if (state->async_update) 11721 return 0; 11722 } 11723 11724 /* Check scaling and underscan changes*/ 11725 /* TODO Removed scaling changes validation due to inability to commit 11726 * new stream into context w\o causing full reset. Need to 11727 * decide how to handle. 11728 */ 11729 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11730 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11731 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11732 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11733 11734 /* Skip any modesets/resets */ 11735 if (!acrtc || drm_atomic_crtc_needs_modeset( 11736 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 11737 continue; 11738 11739 /* Skip any thing not scale or underscan changes */ 11740 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 11741 continue; 11742 11743 lock_and_validation_needed = true; 11744 } 11745 11746 /* set the slot info for each mst_state based on the link encoding format */ 11747 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 11748 struct amdgpu_dm_connector *aconnector; 11749 struct drm_connector *connector; 11750 struct drm_connector_list_iter iter; 11751 u8 link_coding_cap; 11752 11753 drm_connector_list_iter_begin(dev, &iter); 11754 drm_for_each_connector_iter(connector, &iter) { 11755 if (connector->index == mst_state->mgr->conn_base_id) { 11756 aconnector = to_amdgpu_dm_connector(connector); 11757 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 11758 drm_dp_mst_update_slots(mst_state, link_coding_cap); 11759 11760 break; 11761 } 11762 } 11763 drm_connector_list_iter_end(&iter); 11764 } 11765 11766 /** 11767 * Streams and planes are reset when there are changes that affect 11768 * bandwidth. Anything that affects bandwidth needs to go through 11769 * DC global validation to ensure that the configuration can be applied 11770 * to hardware. 11771 * 11772 * We have to currently stall out here in atomic_check for outstanding 11773 * commits to finish in this case because our IRQ handlers reference 11774 * DRM state directly - we can end up disabling interrupts too early 11775 * if we don't. 11776 * 11777 * TODO: Remove this stall and drop DM state private objects. 11778 */ 11779 if (lock_and_validation_needed) { 11780 ret = dm_atomic_get_state(state, &dm_state); 11781 if (ret) { 11782 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); 11783 goto fail; 11784 } 11785 11786 ret = do_aquire_global_lock(dev, state); 11787 if (ret) { 11788 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); 11789 goto fail; 11790 } 11791 11792 #if defined(CONFIG_DRM_AMD_DC_FP) 11793 if (dc_resource_is_dsc_encoding_supported(dc)) { 11794 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 11795 if (ret) { 11796 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); 11797 ret = -EINVAL; 11798 goto fail; 11799 } 11800 } 11801 #endif 11802 11803 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 11804 if (ret) { 11805 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); 11806 goto fail; 11807 } 11808 11809 /* 11810 * Perform validation of MST topology in the state: 11811 * We need to perform MST atomic check before calling 11812 * dc_validate_global_state(), or there is a chance 11813 * to get stuck in an infinite loop and hang eventually. 11814 */ 11815 ret = drm_dp_mst_atomic_check(state); 11816 if (ret) { 11817 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); 11818 goto fail; 11819 } 11820 status = dc_validate_global_state(dc, dm_state->context, true); 11821 if (status != DC_OK) { 11822 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", 11823 dc_status_to_str(status), status); 11824 ret = -EINVAL; 11825 goto fail; 11826 } 11827 } else { 11828 /* 11829 * The commit is a fast update. Fast updates shouldn't change 11830 * the DC context, affect global validation, and can have their 11831 * commit work done in parallel with other commits not touching 11832 * the same resource. If we have a new DC context as part of 11833 * the DM atomic state from validation we need to free it and 11834 * retain the existing one instead. 11835 * 11836 * Furthermore, since the DM atomic state only contains the DC 11837 * context and can safely be annulled, we can free the state 11838 * and clear the associated private object now to free 11839 * some memory and avoid a possible use-after-free later. 11840 */ 11841 11842 for (i = 0; i < state->num_private_objs; i++) { 11843 struct drm_private_obj *obj = state->private_objs[i].ptr; 11844 11845 if (obj->funcs == adev->dm.atomic_obj.funcs) { 11846 int j = state->num_private_objs-1; 11847 11848 dm_atomic_destroy_state(obj, 11849 state->private_objs[i].state); 11850 11851 /* If i is not at the end of the array then the 11852 * last element needs to be moved to where i was 11853 * before the array can safely be truncated. 11854 */ 11855 if (i != j) 11856 state->private_objs[i] = 11857 state->private_objs[j]; 11858 11859 state->private_objs[j].ptr = NULL; 11860 state->private_objs[j].state = NULL; 11861 state->private_objs[j].old_state = NULL; 11862 state->private_objs[j].new_state = NULL; 11863 11864 state->num_private_objs = j; 11865 break; 11866 } 11867 } 11868 } 11869 11870 /* Store the overall update type for use later in atomic check. */ 11871 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11872 struct dm_crtc_state *dm_new_crtc_state = 11873 to_dm_crtc_state(new_crtc_state); 11874 11875 /* 11876 * Only allow async flips for fast updates that don't change 11877 * the FB pitch, the DCC state, rotation, etc. 11878 */ 11879 if (new_crtc_state->async_flip && lock_and_validation_needed) { 11880 drm_dbg_atomic(crtc->dev, 11881 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 11882 crtc->base.id, crtc->name); 11883 ret = -EINVAL; 11884 goto fail; 11885 } 11886 11887 dm_new_crtc_state->update_type = lock_and_validation_needed ? 11888 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 11889 } 11890 11891 /* Must be success */ 11892 WARN_ON(ret); 11893 11894 trace_amdgpu_dm_atomic_check_finish(state, ret); 11895 11896 return ret; 11897 11898 fail: 11899 if (ret == -EDEADLK) 11900 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); 11901 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 11902 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); 11903 else 11904 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); 11905 11906 trace_amdgpu_dm_atomic_check_finish(state, ret); 11907 11908 return ret; 11909 } 11910 11911 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 11912 unsigned int offset, 11913 unsigned int total_length, 11914 u8 *data, 11915 unsigned int length, 11916 struct amdgpu_hdmi_vsdb_info *vsdb) 11917 { 11918 bool res; 11919 union dmub_rb_cmd cmd; 11920 struct dmub_cmd_send_edid_cea *input; 11921 struct dmub_cmd_edid_cea_output *output; 11922 11923 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11924 return false; 11925 11926 memset(&cmd, 0, sizeof(cmd)); 11927 11928 input = &cmd.edid_cea.data.input; 11929 11930 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11931 cmd.edid_cea.header.sub_type = 0; 11932 cmd.edid_cea.header.payload_bytes = 11933 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11934 input->offset = offset; 11935 input->length = length; 11936 input->cea_total_length = total_length; 11937 memcpy(input->payload, data, length); 11938 11939 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 11940 if (!res) { 11941 DRM_ERROR("EDID CEA parser failed\n"); 11942 return false; 11943 } 11944 11945 output = &cmd.edid_cea.data.output; 11946 11947 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11948 if (!output->ack.success) { 11949 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11950 output->ack.offset); 11951 } 11952 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11953 if (!output->amd_vsdb.vsdb_found) 11954 return false; 11955 11956 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11957 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11958 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11959 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11960 } else { 11961 DRM_WARN("Unknown EDID CEA parser results\n"); 11962 return false; 11963 } 11964 11965 return true; 11966 } 11967 11968 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11969 u8 *edid_ext, int len, 11970 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11971 { 11972 int i; 11973 11974 /* send extension block to DMCU for parsing */ 11975 for (i = 0; i < len; i += 8) { 11976 bool res; 11977 int offset; 11978 11979 /* send 8 bytes a time */ 11980 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11981 return false; 11982 11983 if (i+8 == len) { 11984 /* EDID block sent completed, expect result */ 11985 int version, min_rate, max_rate; 11986 11987 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11988 if (res) { 11989 /* amd vsdb found */ 11990 vsdb_info->freesync_supported = 1; 11991 vsdb_info->amd_vsdb_version = version; 11992 vsdb_info->min_refresh_rate_hz = min_rate; 11993 vsdb_info->max_refresh_rate_hz = max_rate; 11994 return true; 11995 } 11996 /* not amd vsdb */ 11997 return false; 11998 } 11999 12000 /* check for ack*/ 12001 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 12002 if (!res) 12003 return false; 12004 } 12005 12006 return false; 12007 } 12008 12009 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 12010 u8 *edid_ext, int len, 12011 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12012 { 12013 int i; 12014 12015 /* send extension block to DMCU for parsing */ 12016 for (i = 0; i < len; i += 8) { 12017 /* send 8 bytes a time */ 12018 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 12019 return false; 12020 } 12021 12022 return vsdb_info->freesync_supported; 12023 } 12024 12025 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 12026 u8 *edid_ext, int len, 12027 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12028 { 12029 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 12030 bool ret; 12031 12032 mutex_lock(&adev->dm.dc_lock); 12033 if (adev->dm.dmub_srv) 12034 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 12035 else 12036 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 12037 mutex_unlock(&adev->dm.dc_lock); 12038 return ret; 12039 } 12040 12041 static void parse_edid_displayid_vrr(struct drm_connector *connector, 12042 const struct edid *edid) 12043 { 12044 u8 *edid_ext = NULL; 12045 int i; 12046 int j = 0; 12047 u16 min_vfreq; 12048 u16 max_vfreq; 12049 12050 if (edid == NULL || edid->extensions == 0) 12051 return; 12052 12053 /* Find DisplayID extension */ 12054 for (i = 0; i < edid->extensions; i++) { 12055 edid_ext = (void *)(edid + (i + 1)); 12056 if (edid_ext[0] == DISPLAYID_EXT) 12057 break; 12058 } 12059 12060 if (edid_ext == NULL) 12061 return; 12062 12063 while (j < EDID_LENGTH) { 12064 /* Get dynamic video timing range from DisplayID if available */ 12065 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 12066 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 12067 min_vfreq = edid_ext[j+9]; 12068 if (edid_ext[j+1] & 7) 12069 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 12070 else 12071 max_vfreq = edid_ext[j+10]; 12072 12073 if (max_vfreq && min_vfreq) { 12074 connector->display_info.monitor_range.max_vfreq = max_vfreq; 12075 connector->display_info.monitor_range.min_vfreq = min_vfreq; 12076 12077 return; 12078 } 12079 } 12080 j++; 12081 } 12082 } 12083 12084 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12085 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 12086 { 12087 u8 *edid_ext = NULL; 12088 int i; 12089 int j = 0; 12090 12091 if (edid == NULL || edid->extensions == 0) 12092 return -ENODEV; 12093 12094 /* Find DisplayID extension */ 12095 for (i = 0; i < edid->extensions; i++) { 12096 edid_ext = (void *)(edid + (i + 1)); 12097 if (edid_ext[0] == DISPLAYID_EXT) 12098 break; 12099 } 12100 12101 while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 12102 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 12103 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 12104 12105 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 12106 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 12107 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 12108 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 12109 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 12110 12111 return true; 12112 } 12113 j++; 12114 } 12115 12116 return false; 12117 } 12118 12119 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12120 const struct edid *edid, 12121 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12122 { 12123 u8 *edid_ext = NULL; 12124 int i; 12125 bool valid_vsdb_found = false; 12126 12127 /*----- drm_find_cea_extension() -----*/ 12128 /* No EDID or EDID extensions */ 12129 if (edid == NULL || edid->extensions == 0) 12130 return -ENODEV; 12131 12132 /* Find CEA extension */ 12133 for (i = 0; i < edid->extensions; i++) { 12134 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 12135 if (edid_ext[0] == CEA_EXT) 12136 break; 12137 } 12138 12139 if (i == edid->extensions) 12140 return -ENODEV; 12141 12142 /*----- cea_db_offsets() -----*/ 12143 if (edid_ext[0] != CEA_EXT) 12144 return -ENODEV; 12145 12146 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 12147 12148 return valid_vsdb_found ? i : -ENODEV; 12149 } 12150 12151 /** 12152 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 12153 * 12154 * @connector: Connector to query. 12155 * @drm_edid: DRM EDID from monitor 12156 * 12157 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 12158 * track of some of the display information in the internal data struct used by 12159 * amdgpu_dm. This function checks which type of connector we need to set the 12160 * FreeSync parameters. 12161 */ 12162 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 12163 const struct drm_edid *drm_edid) 12164 { 12165 int i = 0; 12166 struct amdgpu_dm_connector *amdgpu_dm_connector = 12167 to_amdgpu_dm_connector(connector); 12168 struct dm_connector_state *dm_con_state = NULL; 12169 struct dc_sink *sink; 12170 struct amdgpu_device *adev = drm_to_adev(connector->dev); 12171 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 12172 const struct edid *edid; 12173 bool freesync_capable = false; 12174 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 12175 12176 if (!connector->state) { 12177 DRM_ERROR("%s - Connector has no state", __func__); 12178 goto update; 12179 } 12180 12181 sink = amdgpu_dm_connector->dc_sink ? 12182 amdgpu_dm_connector->dc_sink : 12183 amdgpu_dm_connector->dc_em_sink; 12184 12185 drm_edid_connector_update(connector, drm_edid); 12186 12187 if (!drm_edid || !sink) { 12188 dm_con_state = to_dm_connector_state(connector->state); 12189 12190 amdgpu_dm_connector->min_vfreq = 0; 12191 amdgpu_dm_connector->max_vfreq = 0; 12192 freesync_capable = false; 12193 12194 goto update; 12195 } 12196 12197 dm_con_state = to_dm_connector_state(connector->state); 12198 12199 if (!adev->dm.freesync_module) 12200 goto update; 12201 12202 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 12203 12204 /* Some eDP panels only have the refresh rate range info in DisplayID */ 12205 if ((connector->display_info.monitor_range.min_vfreq == 0 || 12206 connector->display_info.monitor_range.max_vfreq == 0)) 12207 parse_edid_displayid_vrr(connector, edid); 12208 12209 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 12210 sink->sink_signal == SIGNAL_TYPE_EDP)) { 12211 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 12212 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 12213 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12214 freesync_capable = true; 12215 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12216 12217 if (vsdb_info.replay_mode) { 12218 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 12219 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 12220 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 12221 } 12222 12223 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 12224 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12225 if (i >= 0 && vsdb_info.freesync_supported) { 12226 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12227 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12228 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12229 freesync_capable = true; 12230 12231 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12232 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12233 } 12234 } 12235 12236 if (amdgpu_dm_connector->dc_link) 12237 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 12238 12239 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 12240 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12241 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 12242 12243 amdgpu_dm_connector->pack_sdp_v1_3 = true; 12244 amdgpu_dm_connector->as_type = as_type; 12245 amdgpu_dm_connector->vsdb_info = vsdb_info; 12246 12247 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12248 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12249 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12250 freesync_capable = true; 12251 12252 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12253 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12254 } 12255 } 12256 12257 update: 12258 if (dm_con_state) 12259 dm_con_state->freesync_capable = freesync_capable; 12260 12261 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && 12262 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { 12263 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; 12264 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; 12265 } 12266 12267 if (connector->vrr_capable_property) 12268 drm_connector_set_vrr_capable_property(connector, 12269 freesync_capable); 12270 } 12271 12272 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 12273 { 12274 struct amdgpu_device *adev = drm_to_adev(dev); 12275 struct dc *dc = adev->dm.dc; 12276 int i; 12277 12278 mutex_lock(&adev->dm.dc_lock); 12279 if (dc->current_state) { 12280 for (i = 0; i < dc->current_state->stream_count; ++i) 12281 dc->current_state->streams[i] 12282 ->triggered_crtc_reset.enabled = 12283 adev->dm.force_timing_sync; 12284 12285 dm_enable_per_frame_crtc_master_sync(dc->current_state); 12286 dc_trigger_sync(dc, dc->current_state); 12287 } 12288 mutex_unlock(&adev->dm.dc_lock); 12289 } 12290 12291 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 12292 { 12293 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 12294 dc_exit_ips_for_hw_access(dc); 12295 } 12296 12297 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 12298 u32 value, const char *func_name) 12299 { 12300 #ifdef DM_CHECK_ADDR_0 12301 if (address == 0) { 12302 drm_err(adev_to_drm(ctx->driver_context), 12303 "invalid register write. address = 0"); 12304 return; 12305 } 12306 #endif 12307 12308 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12309 cgs_write_register(ctx->cgs_device, address, value); 12310 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 12311 } 12312 12313 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 12314 const char *func_name) 12315 { 12316 u32 value; 12317 #ifdef DM_CHECK_ADDR_0 12318 if (address == 0) { 12319 drm_err(adev_to_drm(ctx->driver_context), 12320 "invalid register read; address = 0\n"); 12321 return 0; 12322 } 12323 #endif 12324 12325 if (ctx->dmub_srv && 12326 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 12327 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 12328 ASSERT(false); 12329 return 0; 12330 } 12331 12332 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12333 12334 value = cgs_read_register(ctx->cgs_device, address); 12335 12336 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 12337 12338 return value; 12339 } 12340 12341 int amdgpu_dm_process_dmub_aux_transfer_sync( 12342 struct dc_context *ctx, 12343 unsigned int link_index, 12344 struct aux_payload *payload, 12345 enum aux_return_code_type *operation_result) 12346 { 12347 struct amdgpu_device *adev = ctx->driver_context; 12348 struct dmub_notification *p_notify = adev->dm.dmub_notify; 12349 int ret = -1; 12350 12351 mutex_lock(&adev->dm.dpia_aux_lock); 12352 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 12353 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 12354 goto out; 12355 } 12356 12357 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12358 DRM_ERROR("wait_for_completion_timeout timeout!"); 12359 *operation_result = AUX_RET_ERROR_TIMEOUT; 12360 goto out; 12361 } 12362 12363 if (p_notify->result != AUX_RET_SUCCESS) { 12364 /* 12365 * Transient states before tunneling is enabled could 12366 * lead to this error. We can ignore this for now. 12367 */ 12368 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 12369 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 12370 payload->address, payload->length, 12371 p_notify->result); 12372 } 12373 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12374 goto out; 12375 } 12376 12377 12378 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 12379 if (!payload->write && p_notify->aux_reply.length && 12380 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 12381 12382 if (payload->length != p_notify->aux_reply.length) { 12383 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 12384 p_notify->aux_reply.length, 12385 payload->address, payload->length); 12386 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 12387 goto out; 12388 } 12389 12390 memcpy(payload->data, p_notify->aux_reply.data, 12391 p_notify->aux_reply.length); 12392 } 12393 12394 /* success */ 12395 ret = p_notify->aux_reply.length; 12396 *operation_result = p_notify->result; 12397 out: 12398 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12399 mutex_unlock(&adev->dm.dpia_aux_lock); 12400 return ret; 12401 } 12402 12403 int amdgpu_dm_process_dmub_set_config_sync( 12404 struct dc_context *ctx, 12405 unsigned int link_index, 12406 struct set_config_cmd_payload *payload, 12407 enum set_config_status *operation_result) 12408 { 12409 struct amdgpu_device *adev = ctx->driver_context; 12410 bool is_cmd_complete; 12411 int ret; 12412 12413 mutex_lock(&adev->dm.dpia_aux_lock); 12414 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 12415 link_index, payload, adev->dm.dmub_notify); 12416 12417 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12418 ret = 0; 12419 *operation_result = adev->dm.dmub_notify->sc_status; 12420 } else { 12421 DRM_ERROR("wait_for_completion_timeout timeout!"); 12422 ret = -1; 12423 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 12424 } 12425 12426 if (!is_cmd_complete) 12427 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12428 mutex_unlock(&adev->dm.dpia_aux_lock); 12429 return ret; 12430 } 12431 12432 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12433 { 12434 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 12435 } 12436 12437 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12438 { 12439 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 12440 } 12441