1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "dc/dc_state.h" 41 #include "amdgpu_dm_trace.h" 42 #include "dpcd_defs.h" 43 #include "link/protocols/link_dpcd.h" 44 #include "link_service_types.h" 45 #include "link/protocols/link_dp_capability.h" 46 #include "link/protocols/link_ddc.h" 47 48 #include "vid.h" 49 #include "amdgpu.h" 50 #include "amdgpu_display.h" 51 #include "amdgpu_ucode.h" 52 #include "atom.h" 53 #include "amdgpu_dm.h" 54 #include "amdgpu_dm_plane.h" 55 #include "amdgpu_dm_crtc.h" 56 #include "amdgpu_dm_hdcp.h" 57 #include <drm/display/drm_hdcp_helper.h> 58 #include "amdgpu_dm_wb.h" 59 #include "amdgpu_pm.h" 60 #include "amdgpu_atombios.h" 61 62 #include "amd_shared.h" 63 #include "amdgpu_dm_irq.h" 64 #include "dm_helpers.h" 65 #include "amdgpu_dm_mst_types.h" 66 #if defined(CONFIG_DEBUG_FS) 67 #include "amdgpu_dm_debugfs.h" 68 #endif 69 #include "amdgpu_dm_psr.h" 70 #include "amdgpu_dm_replay.h" 71 72 #include "ivsrcid/ivsrcid_vislands30.h" 73 74 #include <linux/backlight.h> 75 #include <linux/module.h> 76 #include <linux/moduleparam.h> 77 #include <linux/types.h> 78 #include <linux/pm_runtime.h> 79 #include <linux/pci.h> 80 #include <linux/power_supply.h> 81 #include <linux/firmware.h> 82 #include <linux/component.h> 83 #include <linux/sort.h> 84 85 #include <drm/display/drm_dp_mst_helper.h> 86 #include <drm/display/drm_hdmi_helper.h> 87 #include <drm/drm_atomic.h> 88 #include <drm/drm_atomic_uapi.h> 89 #include <drm/drm_atomic_helper.h> 90 #include <drm/drm_blend.h> 91 #include <drm/drm_fixed.h> 92 #include <drm/drm_fourcc.h> 93 #include <drm/drm_edid.h> 94 #include <drm/drm_eld.h> 95 #include <drm/drm_utils.h> 96 #include <drm/drm_vblank.h> 97 #include <drm/drm_audio_component.h> 98 #include <drm/drm_gem_atomic_helper.h> 99 100 #include <media/cec-notifier.h> 101 #include <acpi/video.h> 102 103 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 104 105 #include "dcn/dcn_1_0_offset.h" 106 #include "dcn/dcn_1_0_sh_mask.h" 107 #include "soc15_hw_ip.h" 108 #include "soc15_common.h" 109 #include "vega10_ip_offset.h" 110 111 #include "gc/gc_11_0_0_offset.h" 112 #include "gc/gc_11_0_0_sh_mask.h" 113 114 #include "modules/inc/mod_freesync.h" 115 #include "modules/power/power_helpers.h" 116 117 static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); 118 119 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 120 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 121 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 122 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 123 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 124 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 125 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 126 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 127 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 128 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 129 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 130 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 131 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 132 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 133 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 134 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 135 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 136 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 137 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 138 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 139 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 140 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 141 142 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 143 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 144 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 145 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 146 147 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 148 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 149 150 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 151 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 152 153 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 154 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 155 156 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 157 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 158 159 #define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin" 160 MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB); 161 162 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 163 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 164 165 /* Number of bytes in PSP header for firmware. */ 166 #define PSP_HEADER_BYTES 0x100 167 168 /* Number of bytes in PSP footer for firmware. */ 169 #define PSP_FOOTER_BYTES 0x100 170 171 /** 172 * DOC: overview 173 * 174 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 175 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 176 * requests into DC requests, and DC responses into DRM responses. 177 * 178 * The root control structure is &struct amdgpu_display_manager. 179 */ 180 181 /* basic init/fini API */ 182 static int amdgpu_dm_init(struct amdgpu_device *adev); 183 static void amdgpu_dm_fini(struct amdgpu_device *adev); 184 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 185 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); 186 static struct amdgpu_i2c_adapter * 187 create_i2c(struct ddc_service *ddc_service, bool oem); 188 189 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 190 { 191 switch (link->dpcd_caps.dongle_type) { 192 case DISPLAY_DONGLE_NONE: 193 return DRM_MODE_SUBCONNECTOR_Native; 194 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 195 return DRM_MODE_SUBCONNECTOR_VGA; 196 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 197 case DISPLAY_DONGLE_DP_DVI_DONGLE: 198 return DRM_MODE_SUBCONNECTOR_DVID; 199 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 200 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 201 return DRM_MODE_SUBCONNECTOR_HDMIA; 202 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 203 default: 204 return DRM_MODE_SUBCONNECTOR_Unknown; 205 } 206 } 207 208 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 209 { 210 struct dc_link *link = aconnector->dc_link; 211 struct drm_connector *connector = &aconnector->base; 212 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 213 214 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 215 return; 216 217 if (aconnector->dc_sink) 218 subconnector = get_subconnector_type(link); 219 220 drm_object_property_set_value(&connector->base, 221 connector->dev->mode_config.dp_subconnector_property, 222 subconnector); 223 } 224 225 /* 226 * initializes drm_device display related structures, based on the information 227 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 228 * drm_encoder, drm_mode_config 229 * 230 * Returns 0 on success 231 */ 232 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 233 /* removes and deallocates the drm structures, created by the above function */ 234 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 235 236 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 237 struct amdgpu_dm_connector *amdgpu_dm_connector, 238 u32 link_index, 239 struct amdgpu_encoder *amdgpu_encoder); 240 static int amdgpu_dm_encoder_init(struct drm_device *dev, 241 struct amdgpu_encoder *aencoder, 242 uint32_t link_index); 243 244 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 245 246 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 247 248 static int amdgpu_dm_atomic_check(struct drm_device *dev, 249 struct drm_atomic_state *state); 250 251 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 252 static void handle_hpd_rx_irq(void *param); 253 254 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 255 int bl_idx, 256 u32 user_brightness); 257 258 static bool 259 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 260 struct drm_crtc_state *new_crtc_state); 261 /* 262 * dm_vblank_get_counter 263 * 264 * @brief 265 * Get counter for number of vertical blanks 266 * 267 * @param 268 * struct amdgpu_device *adev - [in] desired amdgpu device 269 * int disp_idx - [in] which CRTC to get the counter from 270 * 271 * @return 272 * Counter for vertical blanks 273 */ 274 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 275 { 276 struct amdgpu_crtc *acrtc = NULL; 277 278 if (crtc >= adev->mode_info.num_crtc) 279 return 0; 280 281 acrtc = adev->mode_info.crtcs[crtc]; 282 283 if (!acrtc->dm_irq_params.stream) { 284 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", 285 crtc); 286 return 0; 287 } 288 289 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 290 } 291 292 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 293 u32 *vbl, u32 *position) 294 { 295 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; 296 struct amdgpu_crtc *acrtc = NULL; 297 struct dc *dc = adev->dm.dc; 298 299 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 300 return -EINVAL; 301 302 acrtc = adev->mode_info.crtcs[crtc]; 303 304 if (!acrtc->dm_irq_params.stream) { 305 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", 306 crtc); 307 return 0; 308 } 309 310 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 311 dc_allow_idle_optimizations(dc, false); 312 313 /* 314 * TODO rework base driver to use values directly. 315 * for now parse it back into reg-format 316 */ 317 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 318 &v_blank_start, 319 &v_blank_end, 320 &h_position, 321 &v_position); 322 323 *position = v_position | (h_position << 16); 324 *vbl = v_blank_start | (v_blank_end << 16); 325 326 return 0; 327 } 328 329 static bool dm_is_idle(struct amdgpu_ip_block *ip_block) 330 { 331 /* XXX todo */ 332 return true; 333 } 334 335 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) 336 { 337 /* XXX todo */ 338 return 0; 339 } 340 341 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) 342 { 343 return false; 344 } 345 346 static int dm_soft_reset(struct amdgpu_ip_block *ip_block) 347 { 348 /* XXX todo */ 349 return 0; 350 } 351 352 static struct amdgpu_crtc * 353 get_crtc_by_otg_inst(struct amdgpu_device *adev, 354 int otg_inst) 355 { 356 struct drm_device *dev = adev_to_drm(adev); 357 struct drm_crtc *crtc; 358 struct amdgpu_crtc *amdgpu_crtc; 359 360 if (WARN_ON(otg_inst == -1)) 361 return adev->mode_info.crtcs[0]; 362 363 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 364 amdgpu_crtc = to_amdgpu_crtc(crtc); 365 366 if (amdgpu_crtc->otg_inst == otg_inst) 367 return amdgpu_crtc; 368 } 369 370 return NULL; 371 } 372 373 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 374 struct dm_crtc_state *new_state) 375 { 376 if (new_state->stream->adjust.timing_adjust_pending) 377 return true; 378 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 379 return true; 380 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 381 return true; 382 else 383 return false; 384 } 385 386 /* 387 * DC will program planes with their z-order determined by their ordering 388 * in the dc_surface_updates array. This comparator is used to sort them 389 * by descending zpos. 390 */ 391 static int dm_plane_layer_index_cmp(const void *a, const void *b) 392 { 393 const struct dc_surface_update *sa = (struct dc_surface_update *)a; 394 const struct dc_surface_update *sb = (struct dc_surface_update *)b; 395 396 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ 397 return sb->surface->layer_index - sa->surface->layer_index; 398 } 399 400 /** 401 * update_planes_and_stream_adapter() - Send planes to be updated in DC 402 * 403 * DC has a generic way to update planes and stream via 404 * dc_update_planes_and_stream function; however, DM might need some 405 * adjustments and preparation before calling it. This function is a wrapper 406 * for the dc_update_planes_and_stream that does any required configuration 407 * before passing control to DC. 408 * 409 * @dc: Display Core control structure 410 * @update_type: specify whether it is FULL/MEDIUM/FAST update 411 * @planes_count: planes count to update 412 * @stream: stream state 413 * @stream_update: stream update 414 * @array_of_surface_update: dc surface update pointer 415 * 416 */ 417 static inline bool update_planes_and_stream_adapter(struct dc *dc, 418 int update_type, 419 int planes_count, 420 struct dc_stream_state *stream, 421 struct dc_stream_update *stream_update, 422 struct dc_surface_update *array_of_surface_update) 423 { 424 sort(array_of_surface_update, planes_count, 425 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); 426 427 /* 428 * Previous frame finished and HW is ready for optimization. 429 */ 430 if (update_type == UPDATE_TYPE_FAST) 431 dc_post_update_surfaces_to_stream(dc); 432 433 return dc_update_planes_and_stream(dc, 434 array_of_surface_update, 435 planes_count, 436 stream, 437 stream_update); 438 } 439 440 /** 441 * dm_pflip_high_irq() - Handle pageflip interrupt 442 * @interrupt_params: ignored 443 * 444 * Handles the pageflip interrupt by notifying all interested parties 445 * that the pageflip has been completed. 446 */ 447 static void dm_pflip_high_irq(void *interrupt_params) 448 { 449 struct amdgpu_crtc *amdgpu_crtc; 450 struct common_irq_params *irq_params = interrupt_params; 451 struct amdgpu_device *adev = irq_params->adev; 452 struct drm_device *dev = adev_to_drm(adev); 453 unsigned long flags; 454 struct drm_pending_vblank_event *e; 455 u32 vpos, hpos, v_blank_start, v_blank_end; 456 bool vrr_active; 457 458 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 459 460 /* IRQ could occur when in initial stage */ 461 /* TODO work and BO cleanup */ 462 if (amdgpu_crtc == NULL) { 463 drm_dbg_state(dev, "CRTC is null, returning.\n"); 464 return; 465 } 466 467 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 468 469 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 470 drm_dbg_state(dev, 471 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 472 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 473 amdgpu_crtc->crtc_id, amdgpu_crtc); 474 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 475 return; 476 } 477 478 /* page flip completed. */ 479 e = amdgpu_crtc->event; 480 amdgpu_crtc->event = NULL; 481 482 WARN_ON(!e); 483 484 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 485 486 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 487 if (!vrr_active || 488 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 489 &v_blank_end, &hpos, &vpos) || 490 (vpos < v_blank_start)) { 491 /* Update to correct count and vblank timestamp if racing with 492 * vblank irq. This also updates to the correct vblank timestamp 493 * even in VRR mode, as scanout is past the front-porch atm. 494 */ 495 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 496 497 /* Wake up userspace by sending the pageflip event with proper 498 * count and timestamp of vblank of flip completion. 499 */ 500 if (e) { 501 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 502 503 /* Event sent, so done with vblank for this flip */ 504 drm_crtc_vblank_put(&amdgpu_crtc->base); 505 } 506 } else if (e) { 507 /* VRR active and inside front-porch: vblank count and 508 * timestamp for pageflip event will only be up to date after 509 * drm_crtc_handle_vblank() has been executed from late vblank 510 * irq handler after start of back-porch (vline 0). We queue the 511 * pageflip event for send-out by drm_crtc_handle_vblank() with 512 * updated timestamp and count, once it runs after us. 513 * 514 * We need to open-code this instead of using the helper 515 * drm_crtc_arm_vblank_event(), as that helper would 516 * call drm_crtc_accurate_vblank_count(), which we must 517 * not call in VRR mode while we are in front-porch! 518 */ 519 520 /* sequence will be replaced by real count during send-out. */ 521 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 522 e->pipe = amdgpu_crtc->crtc_id; 523 524 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 525 e = NULL; 526 } 527 528 /* Keep track of vblank of this flip for flip throttling. We use the 529 * cooked hw counter, as that one incremented at start of this vblank 530 * of pageflip completion, so last_flip_vblank is the forbidden count 531 * for queueing new pageflips if vsync + VRR is enabled. 532 */ 533 amdgpu_crtc->dm_irq_params.last_flip_vblank = 534 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 535 536 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 537 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 538 539 drm_dbg_state(dev, 540 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 541 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 542 } 543 544 static void dm_vupdate_high_irq(void *interrupt_params) 545 { 546 struct common_irq_params *irq_params = interrupt_params; 547 struct amdgpu_device *adev = irq_params->adev; 548 struct amdgpu_crtc *acrtc; 549 struct drm_device *drm_dev; 550 struct drm_vblank_crtc *vblank; 551 ktime_t frame_duration_ns, previous_timestamp; 552 unsigned long flags; 553 int vrr_active; 554 555 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 556 557 if (acrtc) { 558 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 559 drm_dev = acrtc->base.dev; 560 vblank = drm_crtc_vblank_crtc(&acrtc->base); 561 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 562 frame_duration_ns = vblank->time - previous_timestamp; 563 564 if (frame_duration_ns > 0) { 565 trace_amdgpu_refresh_rate_track(acrtc->base.index, 566 frame_duration_ns, 567 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 568 atomic64_set(&irq_params->previous_timestamp, vblank->time); 569 } 570 571 drm_dbg_vbl(drm_dev, 572 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 573 vrr_active); 574 575 /* Core vblank handling is done here after end of front-porch in 576 * vrr mode, as vblank timestamping will give valid results 577 * while now done after front-porch. This will also deliver 578 * page-flip completion events that have been queued to us 579 * if a pageflip happened inside front-porch. 580 */ 581 if (vrr_active) { 582 amdgpu_dm_crtc_handle_vblank(acrtc); 583 584 /* BTR processing for pre-DCE12 ASICs */ 585 if (acrtc->dm_irq_params.stream && 586 adev->family < AMDGPU_FAMILY_AI) { 587 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 588 mod_freesync_handle_v_update( 589 adev->dm.freesync_module, 590 acrtc->dm_irq_params.stream, 591 &acrtc->dm_irq_params.vrr_params); 592 593 dc_stream_adjust_vmin_vmax( 594 adev->dm.dc, 595 acrtc->dm_irq_params.stream, 596 &acrtc->dm_irq_params.vrr_params.adjust); 597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 598 } 599 } 600 } 601 } 602 603 /** 604 * dm_crtc_high_irq() - Handles CRTC interrupt 605 * @interrupt_params: used for determining the CRTC instance 606 * 607 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 608 * event handler. 609 */ 610 static void dm_crtc_high_irq(void *interrupt_params) 611 { 612 struct common_irq_params *irq_params = interrupt_params; 613 struct amdgpu_device *adev = irq_params->adev; 614 struct drm_writeback_job *job; 615 struct amdgpu_crtc *acrtc; 616 unsigned long flags; 617 int vrr_active; 618 619 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 620 if (!acrtc) 621 return; 622 623 if (acrtc->wb_conn) { 624 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); 625 626 if (acrtc->wb_pending) { 627 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, 628 struct drm_writeback_job, 629 list_entry); 630 acrtc->wb_pending = false; 631 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 632 633 if (job) { 634 unsigned int v_total, refresh_hz; 635 struct dc_stream_state *stream = acrtc->dm_irq_params.stream; 636 637 v_total = stream->adjust.v_total_max ? 638 stream->adjust.v_total_max : stream->timing.v_total; 639 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * 640 100LL, (v_total * stream->timing.h_total)); 641 mdelay(1000 / refresh_hz); 642 643 drm_writeback_signal_completion(acrtc->wb_conn, 0); 644 dc_stream_fc_disable_writeback(adev->dm.dc, 645 acrtc->dm_irq_params.stream, 0); 646 } 647 } else 648 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); 649 } 650 651 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 652 653 drm_dbg_vbl(adev_to_drm(adev), 654 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 655 vrr_active, acrtc->dm_irq_params.active_planes); 656 657 /** 658 * Core vblank handling at start of front-porch is only possible 659 * in non-vrr mode, as only there vblank timestamping will give 660 * valid results while done in front-porch. Otherwise defer it 661 * to dm_vupdate_high_irq after end of front-porch. 662 */ 663 if (!vrr_active) 664 amdgpu_dm_crtc_handle_vblank(acrtc); 665 666 /** 667 * Following stuff must happen at start of vblank, for crc 668 * computation and below-the-range btr support in vrr mode. 669 */ 670 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 671 672 /* BTR updates need to happen before VUPDATE on Vega and above. */ 673 if (adev->family < AMDGPU_FAMILY_AI) 674 return; 675 676 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 677 678 if (acrtc->dm_irq_params.stream && 679 acrtc->dm_irq_params.vrr_params.supported && 680 acrtc->dm_irq_params.freesync_config.state == 681 VRR_STATE_ACTIVE_VARIABLE) { 682 mod_freesync_handle_v_update(adev->dm.freesync_module, 683 acrtc->dm_irq_params.stream, 684 &acrtc->dm_irq_params.vrr_params); 685 686 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 687 &acrtc->dm_irq_params.vrr_params.adjust); 688 } 689 690 /* 691 * If there aren't any active_planes then DCH HUBP may be clock-gated. 692 * In that case, pageflip completion interrupts won't fire and pageflip 693 * completion events won't get delivered. Prevent this by sending 694 * pending pageflip events from here if a flip is still pending. 695 * 696 * If any planes are enabled, use dm_pflip_high_irq() instead, to 697 * avoid race conditions between flip programming and completion, 698 * which could cause too early flip completion events. 699 */ 700 if (adev->family >= AMDGPU_FAMILY_RV && 701 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 702 acrtc->dm_irq_params.active_planes == 0) { 703 if (acrtc->event) { 704 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 705 acrtc->event = NULL; 706 drm_crtc_vblank_put(&acrtc->base); 707 } 708 acrtc->pflip_status = AMDGPU_FLIP_NONE; 709 } 710 711 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 712 } 713 714 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 715 /** 716 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 717 * DCN generation ASICs 718 * @interrupt_params: interrupt parameters 719 * 720 * Used to set crc window/read out crc value at vertical line 0 position 721 */ 722 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 723 { 724 struct common_irq_params *irq_params = interrupt_params; 725 struct amdgpu_device *adev = irq_params->adev; 726 struct amdgpu_crtc *acrtc; 727 728 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 729 730 if (!acrtc) 731 return; 732 733 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 734 } 735 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 736 737 /** 738 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 739 * @adev: amdgpu_device pointer 740 * @notify: dmub notification structure 741 * 742 * Dmub AUX or SET_CONFIG command completion processing callback 743 * Copies dmub notification to DM which is to be read by AUX command. 744 * issuing thread and also signals the event to wake up the thread. 745 */ 746 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 747 struct dmub_notification *notify) 748 { 749 if (adev->dm.dmub_notify) 750 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 751 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 752 complete(&adev->dm.dmub_aux_transfer_done); 753 } 754 755 static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, 756 struct dmub_notification *notify) 757 { 758 if (!adev || !notify) { 759 ASSERT(false); 760 return; 761 } 762 763 const struct dmub_cmd_fused_request *req = ¬ify->fused_request; 764 const uint8_t ddc_line = req->u.aux.ddc_line; 765 766 if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { 767 ASSERT(false); 768 return; 769 } 770 771 struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; 772 773 static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); 774 memcpy(sync->reply_data, req, sizeof(*req)); 775 complete(&sync->replied); 776 } 777 778 /** 779 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 780 * @adev: amdgpu_device pointer 781 * @notify: dmub notification structure 782 * 783 * Dmub Hpd interrupt processing callback. Gets displayindex through the 784 * ink index and calls helper to do the processing. 785 */ 786 static void dmub_hpd_callback(struct amdgpu_device *adev, 787 struct dmub_notification *notify) 788 { 789 struct amdgpu_dm_connector *aconnector; 790 struct amdgpu_dm_connector *hpd_aconnector = NULL; 791 struct drm_connector *connector; 792 struct drm_connector_list_iter iter; 793 struct dc_link *link; 794 u8 link_index = 0; 795 struct drm_device *dev; 796 797 if (adev == NULL) 798 return; 799 800 if (notify == NULL) { 801 drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); 802 return; 803 } 804 805 if (notify->link_index > adev->dm.dc->link_count) { 806 drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); 807 return; 808 } 809 810 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ 811 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { 812 drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); 813 return; 814 } 815 816 link_index = notify->link_index; 817 link = adev->dm.dc->links[link_index]; 818 dev = adev->dm.ddev; 819 820 drm_connector_list_iter_begin(dev, &iter); 821 drm_for_each_connector_iter(connector, &iter) { 822 823 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 824 continue; 825 826 aconnector = to_amdgpu_dm_connector(connector); 827 if (link && aconnector->dc_link == link) { 828 if (notify->type == DMUB_NOTIFICATION_HPD) 829 drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index); 830 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 831 drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index); 832 else 833 drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n", 834 notify->type, link_index); 835 836 hpd_aconnector = aconnector; 837 break; 838 } 839 } 840 drm_connector_list_iter_end(&iter); 841 842 if (hpd_aconnector) { 843 if (notify->type == DMUB_NOTIFICATION_HPD) { 844 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) 845 drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index); 846 handle_hpd_irq_helper(hpd_aconnector); 847 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { 848 handle_hpd_rx_irq(hpd_aconnector); 849 } 850 } 851 } 852 853 /** 854 * dmub_hpd_sense_callback - DMUB HPD sense processing callback. 855 * @adev: amdgpu_device pointer 856 * @notify: dmub notification structure 857 * 858 * HPD sense changes can occur during low power states and need to be 859 * notified from firmware to driver. 860 */ 861 static void dmub_hpd_sense_callback(struct amdgpu_device *adev, 862 struct dmub_notification *notify) 863 { 864 drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); 865 } 866 867 /** 868 * register_dmub_notify_callback - Sets callback for DMUB notify 869 * @adev: amdgpu_device pointer 870 * @type: Type of dmub notification 871 * @callback: Dmub interrupt callback function 872 * @dmub_int_thread_offload: offload indicator 873 * 874 * API to register a dmub callback handler for a dmub notification 875 * Also sets indicator whether callback processing to be offloaded. 876 * to dmub interrupt handling thread 877 * Return: true if successfully registered, false if there is existing registration 878 */ 879 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 880 enum dmub_notification_type type, 881 dmub_notify_interrupt_callback_t callback, 882 bool dmub_int_thread_offload) 883 { 884 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 885 adev->dm.dmub_callback[type] = callback; 886 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 887 } else 888 return false; 889 890 return true; 891 } 892 893 static void dm_handle_hpd_work(struct work_struct *work) 894 { 895 struct dmub_hpd_work *dmub_hpd_wrk; 896 897 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 898 899 if (!dmub_hpd_wrk->dmub_notify) { 900 drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL"); 901 return; 902 } 903 904 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 905 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 906 dmub_hpd_wrk->dmub_notify); 907 } 908 909 kfree(dmub_hpd_wrk->dmub_notify); 910 kfree(dmub_hpd_wrk); 911 912 } 913 914 static const char *dmub_notification_type_str(enum dmub_notification_type e) 915 { 916 switch (e) { 917 case DMUB_NOTIFICATION_NO_DATA: 918 return "NO_DATA"; 919 case DMUB_NOTIFICATION_AUX_REPLY: 920 return "AUX_REPLY"; 921 case DMUB_NOTIFICATION_HPD: 922 return "HPD"; 923 case DMUB_NOTIFICATION_HPD_IRQ: 924 return "HPD_IRQ"; 925 case DMUB_NOTIFICATION_SET_CONFIG_REPLY: 926 return "SET_CONFIG_REPLY"; 927 case DMUB_NOTIFICATION_DPIA_NOTIFICATION: 928 return "DPIA_NOTIFICATION"; 929 case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: 930 return "HPD_SENSE_NOTIFY"; 931 case DMUB_NOTIFICATION_FUSED_IO: 932 return "FUSED_IO"; 933 default: 934 return "<unknown>"; 935 } 936 } 937 938 #define DMUB_TRACE_MAX_READ 64 939 /** 940 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 941 * @interrupt_params: used for determining the Outbox instance 942 * 943 * Handles the Outbox Interrupt 944 * event handler. 945 */ 946 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 947 { 948 struct dmub_notification notify = {0}; 949 struct common_irq_params *irq_params = interrupt_params; 950 struct amdgpu_device *adev = irq_params->adev; 951 struct amdgpu_display_manager *dm = &adev->dm; 952 struct dmcub_trace_buf_entry entry = { 0 }; 953 u32 count = 0; 954 struct dmub_hpd_work *dmub_hpd_wrk; 955 956 do { 957 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 958 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 959 entry.param0, entry.param1); 960 961 drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 962 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 963 } else 964 break; 965 966 count++; 967 968 } while (count <= DMUB_TRACE_MAX_READ); 969 970 if (count > DMUB_TRACE_MAX_READ) 971 drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); 972 973 if (dc_enable_dmub_notifications(adev->dm.dc) && 974 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 975 976 do { 977 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 978 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 979 drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type); 980 continue; 981 } 982 if (!dm->dmub_callback[notify.type]) { 983 drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", 984 dmub_notification_type_str(notify.type)); 985 continue; 986 } 987 if (dm->dmub_thread_offload[notify.type] == true) { 988 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 989 if (!dmub_hpd_wrk) { 990 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk"); 991 return; 992 } 993 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 994 GFP_ATOMIC); 995 if (!dmub_hpd_wrk->dmub_notify) { 996 kfree(dmub_hpd_wrk); 997 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify"); 998 return; 999 } 1000 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 1001 dmub_hpd_wrk->adev = adev; 1002 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 1003 } else { 1004 dm->dmub_callback[notify.type](adev, ¬ify); 1005 } 1006 } while (notify.pending_notification); 1007 } 1008 } 1009 1010 static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1011 enum amd_clockgating_state state) 1012 { 1013 return 0; 1014 } 1015 1016 static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, 1017 enum amd_powergating_state state) 1018 { 1019 return 0; 1020 } 1021 1022 /* Prototypes of private functions */ 1023 static int dm_early_init(struct amdgpu_ip_block *ip_block); 1024 1025 /* Allocate memory for FBC compressed data */ 1026 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 1027 { 1028 struct amdgpu_device *adev = drm_to_adev(connector->dev); 1029 struct dm_compressor_info *compressor = &adev->dm.compressor; 1030 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 1031 struct drm_display_mode *mode; 1032 unsigned long max_size = 0; 1033 1034 if (adev->dm.dc->fbc_compressor == NULL) 1035 return; 1036 1037 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 1038 return; 1039 1040 if (compressor->bo_ptr) 1041 return; 1042 1043 1044 list_for_each_entry(mode, &connector->modes, head) { 1045 if (max_size < (unsigned long) mode->htotal * mode->vtotal) 1046 max_size = (unsigned long) mode->htotal * mode->vtotal; 1047 } 1048 1049 if (max_size) { 1050 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 1051 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 1052 &compressor->gpu_addr, &compressor->cpu_addr); 1053 1054 if (r) 1055 drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); 1056 else { 1057 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 1058 drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4); 1059 } 1060 1061 } 1062 1063 } 1064 1065 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 1066 int pipe, bool *enabled, 1067 unsigned char *buf, int max_bytes) 1068 { 1069 struct drm_device *dev = dev_get_drvdata(kdev); 1070 struct amdgpu_device *adev = drm_to_adev(dev); 1071 struct drm_connector *connector; 1072 struct drm_connector_list_iter conn_iter; 1073 struct amdgpu_dm_connector *aconnector; 1074 int ret = 0; 1075 1076 *enabled = false; 1077 1078 mutex_lock(&adev->dm.audio_lock); 1079 1080 drm_connector_list_iter_begin(dev, &conn_iter); 1081 drm_for_each_connector_iter(connector, &conn_iter) { 1082 1083 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1084 continue; 1085 1086 aconnector = to_amdgpu_dm_connector(connector); 1087 if (aconnector->audio_inst != port) 1088 continue; 1089 1090 *enabled = true; 1091 mutex_lock(&connector->eld_mutex); 1092 ret = drm_eld_size(connector->eld); 1093 memcpy(buf, connector->eld, min(max_bytes, ret)); 1094 mutex_unlock(&connector->eld_mutex); 1095 1096 break; 1097 } 1098 drm_connector_list_iter_end(&conn_iter); 1099 1100 mutex_unlock(&adev->dm.audio_lock); 1101 1102 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 1103 1104 return ret; 1105 } 1106 1107 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 1108 .get_eld = amdgpu_dm_audio_component_get_eld, 1109 }; 1110 1111 static int amdgpu_dm_audio_component_bind(struct device *kdev, 1112 struct device *hda_kdev, void *data) 1113 { 1114 struct drm_device *dev = dev_get_drvdata(kdev); 1115 struct amdgpu_device *adev = drm_to_adev(dev); 1116 struct drm_audio_component *acomp = data; 1117 1118 acomp->ops = &amdgpu_dm_audio_component_ops; 1119 acomp->dev = kdev; 1120 adev->dm.audio_component = acomp; 1121 1122 return 0; 1123 } 1124 1125 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 1126 struct device *hda_kdev, void *data) 1127 { 1128 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); 1129 struct drm_audio_component *acomp = data; 1130 1131 acomp->ops = NULL; 1132 acomp->dev = NULL; 1133 adev->dm.audio_component = NULL; 1134 } 1135 1136 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1137 .bind = amdgpu_dm_audio_component_bind, 1138 .unbind = amdgpu_dm_audio_component_unbind, 1139 }; 1140 1141 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1142 { 1143 int i, ret; 1144 1145 if (!amdgpu_audio) 1146 return 0; 1147 1148 adev->mode_info.audio.enabled = true; 1149 1150 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1151 1152 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1153 adev->mode_info.audio.pin[i].channels = -1; 1154 adev->mode_info.audio.pin[i].rate = -1; 1155 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1156 adev->mode_info.audio.pin[i].status_bits = 0; 1157 adev->mode_info.audio.pin[i].category_code = 0; 1158 adev->mode_info.audio.pin[i].connected = false; 1159 adev->mode_info.audio.pin[i].id = 1160 adev->dm.dc->res_pool->audios[i]->inst; 1161 adev->mode_info.audio.pin[i].offset = 0; 1162 } 1163 1164 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1165 if (ret < 0) 1166 return ret; 1167 1168 adev->dm.audio_registered = true; 1169 1170 return 0; 1171 } 1172 1173 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1174 { 1175 if (!amdgpu_audio) 1176 return; 1177 1178 if (!adev->mode_info.audio.enabled) 1179 return; 1180 1181 if (adev->dm.audio_registered) { 1182 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1183 adev->dm.audio_registered = false; 1184 } 1185 1186 /* TODO: Disable audio? */ 1187 1188 adev->mode_info.audio.enabled = false; 1189 } 1190 1191 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1192 { 1193 struct drm_audio_component *acomp = adev->dm.audio_component; 1194 1195 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1196 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1197 1198 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1199 pin, -1); 1200 } 1201 } 1202 1203 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1204 { 1205 const struct dmcub_firmware_header_v1_0 *hdr; 1206 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1207 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1208 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1209 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1210 struct abm *abm = adev->dm.dc->res_pool->abm; 1211 struct dc_context *ctx = adev->dm.dc->ctx; 1212 struct dmub_srv_hw_params hw_params; 1213 enum dmub_status status; 1214 const unsigned char *fw_inst_const, *fw_bss_data; 1215 u32 i, fw_inst_const_size, fw_bss_data_size; 1216 bool has_hw_support; 1217 1218 if (!dmub_srv) 1219 /* DMUB isn't supported on the ASIC. */ 1220 return 0; 1221 1222 if (!fb_info) { 1223 drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); 1224 return -EINVAL; 1225 } 1226 1227 if (!dmub_fw) { 1228 /* Firmware required for DMUB support. */ 1229 drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); 1230 return -EINVAL; 1231 } 1232 1233 /* initialize register offsets for ASICs with runtime initialization available */ 1234 if (dmub_srv->hw_funcs.init_reg_offsets) 1235 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1236 1237 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1238 if (status != DMUB_STATUS_OK) { 1239 drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); 1240 return -EINVAL; 1241 } 1242 1243 if (!has_hw_support) { 1244 drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); 1245 return 0; 1246 } 1247 1248 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1249 status = dmub_srv_hw_reset(dmub_srv); 1250 if (status != DMUB_STATUS_OK) 1251 drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status); 1252 1253 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1254 1255 fw_inst_const = dmub_fw->data + 1256 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1257 PSP_HEADER_BYTES; 1258 1259 fw_bss_data = dmub_fw->data + 1260 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1261 le32_to_cpu(hdr->inst_const_bytes); 1262 1263 /* Copy firmware and bios info into FB memory. */ 1264 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1265 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1266 1267 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1268 1269 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1270 * amdgpu_ucode_init_single_fw will load dmub firmware 1271 * fw_inst_const part to cw0; otherwise, the firmware back door load 1272 * will be done by dm_dmub_hw_init 1273 */ 1274 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1275 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1276 fw_inst_const_size); 1277 } 1278 1279 if (fw_bss_data_size) 1280 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1281 fw_bss_data, fw_bss_data_size); 1282 1283 /* Copy firmware bios info into FB memory. */ 1284 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1285 adev->bios_size); 1286 1287 /* Reset regions that need to be reset. */ 1288 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1289 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1290 1291 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1292 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1293 1294 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1295 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1296 1297 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, 1298 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); 1299 1300 /* Initialize hardware. */ 1301 memset(&hw_params, 0, sizeof(hw_params)); 1302 hw_params.fb_base = adev->gmc.fb_start; 1303 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1304 1305 /* backdoor load firmware and trigger dmub running */ 1306 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1307 hw_params.load_inst_const = true; 1308 1309 if (dmcu) 1310 hw_params.psp_version = dmcu->psp_version; 1311 1312 for (i = 0; i < fb_info->num_fb; ++i) 1313 hw_params.fb[i] = &fb_info->fb[i]; 1314 1315 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1316 case IP_VERSION(3, 1, 3): 1317 case IP_VERSION(3, 1, 4): 1318 case IP_VERSION(3, 5, 0): 1319 case IP_VERSION(3, 5, 1): 1320 case IP_VERSION(3, 6, 0): 1321 case IP_VERSION(4, 0, 1): 1322 hw_params.dpia_supported = true; 1323 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1324 break; 1325 default: 1326 break; 1327 } 1328 1329 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1330 case IP_VERSION(3, 5, 0): 1331 case IP_VERSION(3, 5, 1): 1332 case IP_VERSION(3, 6, 0): 1333 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1334 hw_params.lower_hbr3_phy_ssc = true; 1335 break; 1336 default: 1337 break; 1338 } 1339 1340 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1341 if (status != DMUB_STATUS_OK) { 1342 drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); 1343 return -EINVAL; 1344 } 1345 1346 /* Wait for firmware load to finish. */ 1347 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1348 if (status != DMUB_STATUS_OK) 1349 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); 1350 1351 /* Init DMCU and ABM if available. */ 1352 if (dmcu && abm) { 1353 dmcu->funcs->dmcu_init(dmcu); 1354 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1355 } 1356 1357 if (!adev->dm.dc->ctx->dmub_srv) 1358 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1359 if (!adev->dm.dc->ctx->dmub_srv) { 1360 drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); 1361 return -ENOMEM; 1362 } 1363 1364 drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n", 1365 adev->dm.dmcub_fw_version); 1366 1367 /* Keeping sanity checks off if 1368 * DCN31 >= 4.0.59.0 1369 * DCN314 >= 8.0.16.0 1370 * Otherwise, turn on sanity checks 1371 */ 1372 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1373 case IP_VERSION(3, 1, 2): 1374 case IP_VERSION(3, 1, 3): 1375 if (adev->dm.dmcub_fw_version && 1376 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1377 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) 1378 adev->dm.dc->debug.sanity_checks = true; 1379 break; 1380 case IP_VERSION(3, 1, 4): 1381 if (adev->dm.dmcub_fw_version && 1382 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && 1383 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) 1384 adev->dm.dc->debug.sanity_checks = true; 1385 break; 1386 default: 1387 break; 1388 } 1389 1390 return 0; 1391 } 1392 1393 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1394 { 1395 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1396 enum dmub_status status; 1397 bool init; 1398 int r; 1399 1400 if (!dmub_srv) { 1401 /* DMUB isn't supported on the ASIC. */ 1402 return; 1403 } 1404 1405 status = dmub_srv_is_hw_init(dmub_srv, &init); 1406 if (status != DMUB_STATUS_OK) 1407 drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status); 1408 1409 if (status == DMUB_STATUS_OK && init) { 1410 /* Wait for firmware load to finish. */ 1411 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1412 if (status != DMUB_STATUS_OK) 1413 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); 1414 } else { 1415 /* Perform the full hardware initialization. */ 1416 r = dm_dmub_hw_init(adev); 1417 if (r) 1418 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 1419 } 1420 } 1421 1422 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1423 { 1424 u64 pt_base; 1425 u32 logical_addr_low; 1426 u32 logical_addr_high; 1427 u32 agp_base, agp_bot, agp_top; 1428 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1429 1430 memset(pa_config, 0, sizeof(*pa_config)); 1431 1432 agp_base = 0; 1433 agp_bot = adev->gmc.agp_start >> 24; 1434 agp_top = adev->gmc.agp_end >> 24; 1435 1436 /* AGP aperture is disabled */ 1437 if (agp_bot > agp_top) { 1438 logical_addr_low = adev->gmc.fb_start >> 18; 1439 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1440 AMD_APU_IS_RENOIR | 1441 AMD_APU_IS_GREEN_SARDINE)) 1442 /* 1443 * Raven2 has a HW issue that it is unable to use the vram which 1444 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1445 * workaround that increase system aperture high address (add 1) 1446 * to get rid of the VM fault and hardware hang. 1447 */ 1448 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1449 else 1450 logical_addr_high = adev->gmc.fb_end >> 18; 1451 } else { 1452 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1453 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1454 AMD_APU_IS_RENOIR | 1455 AMD_APU_IS_GREEN_SARDINE)) 1456 /* 1457 * Raven2 has a HW issue that it is unable to use the vram which 1458 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1459 * workaround that increase system aperture high address (add 1) 1460 * to get rid of the VM fault and hardware hang. 1461 */ 1462 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1463 else 1464 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1465 } 1466 1467 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1468 1469 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1470 AMDGPU_GPU_PAGE_SHIFT); 1471 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1472 AMDGPU_GPU_PAGE_SHIFT); 1473 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1474 AMDGPU_GPU_PAGE_SHIFT); 1475 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1476 AMDGPU_GPU_PAGE_SHIFT); 1477 page_table_base.high_part = upper_32_bits(pt_base); 1478 page_table_base.low_part = lower_32_bits(pt_base); 1479 1480 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1481 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1482 1483 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1484 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1485 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1486 1487 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1488 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1489 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1490 1491 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1492 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1493 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1494 1495 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1496 1497 } 1498 1499 static void force_connector_state( 1500 struct amdgpu_dm_connector *aconnector, 1501 enum drm_connector_force force_state) 1502 { 1503 struct drm_connector *connector = &aconnector->base; 1504 1505 mutex_lock(&connector->dev->mode_config.mutex); 1506 aconnector->base.force = force_state; 1507 mutex_unlock(&connector->dev->mode_config.mutex); 1508 1509 mutex_lock(&aconnector->hpd_lock); 1510 drm_kms_helper_connector_hotplug_event(connector); 1511 mutex_unlock(&aconnector->hpd_lock); 1512 } 1513 1514 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1515 { 1516 struct hpd_rx_irq_offload_work *offload_work; 1517 struct amdgpu_dm_connector *aconnector; 1518 struct dc_link *dc_link; 1519 struct amdgpu_device *adev; 1520 enum dc_connection_type new_connection_type = dc_connection_none; 1521 unsigned long flags; 1522 union test_response test_response; 1523 1524 memset(&test_response, 0, sizeof(test_response)); 1525 1526 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1527 aconnector = offload_work->offload_wq->aconnector; 1528 adev = offload_work->adev; 1529 1530 if (!aconnector) { 1531 drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1532 goto skip; 1533 } 1534 1535 dc_link = aconnector->dc_link; 1536 1537 mutex_lock(&aconnector->hpd_lock); 1538 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1539 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 1540 mutex_unlock(&aconnector->hpd_lock); 1541 1542 if (new_connection_type == dc_connection_none) 1543 goto skip; 1544 1545 if (amdgpu_in_reset(adev)) 1546 goto skip; 1547 1548 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1549 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1550 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1551 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1552 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1553 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1554 goto skip; 1555 } 1556 1557 mutex_lock(&adev->dm.dc_lock); 1558 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1559 dc_link_dp_handle_automated_test(dc_link); 1560 1561 if (aconnector->timing_changed) { 1562 /* force connector disconnect and reconnect */ 1563 force_connector_state(aconnector, DRM_FORCE_OFF); 1564 msleep(100); 1565 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1566 } 1567 1568 test_response.bits.ACK = 1; 1569 1570 core_link_write_dpcd( 1571 dc_link, 1572 DP_TEST_RESPONSE, 1573 &test_response.raw, 1574 sizeof(test_response)); 1575 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1576 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1577 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1578 /* offload_work->data is from handle_hpd_rx_irq-> 1579 * schedule_hpd_rx_offload_work.this is defer handle 1580 * for hpd short pulse. upon here, link status may be 1581 * changed, need get latest link status from dpcd 1582 * registers. if link status is good, skip run link 1583 * training again. 1584 */ 1585 union hpd_irq_data irq_data; 1586 1587 memset(&irq_data, 0, sizeof(irq_data)); 1588 1589 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1590 * request be added to work queue if link lost at end of dc_link_ 1591 * dp_handle_link_loss 1592 */ 1593 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1594 offload_work->offload_wq->is_handling_link_loss = false; 1595 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1596 1597 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1598 dc_link_check_link_loss_status(dc_link, &irq_data)) 1599 dc_link_dp_handle_link_loss(dc_link); 1600 } 1601 mutex_unlock(&adev->dm.dc_lock); 1602 1603 skip: 1604 kfree(offload_work); 1605 1606 } 1607 1608 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev) 1609 { 1610 struct dc *dc = adev->dm.dc; 1611 int max_caps = dc->caps.max_links; 1612 int i = 0; 1613 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1614 1615 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1616 1617 if (!hpd_rx_offload_wq) 1618 return NULL; 1619 1620 1621 for (i = 0; i < max_caps; i++) { 1622 hpd_rx_offload_wq[i].wq = 1623 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1624 1625 if (hpd_rx_offload_wq[i].wq == NULL) { 1626 drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!"); 1627 goto out_err; 1628 } 1629 1630 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1631 } 1632 1633 return hpd_rx_offload_wq; 1634 1635 out_err: 1636 for (i = 0; i < max_caps; i++) { 1637 if (hpd_rx_offload_wq[i].wq) 1638 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1639 } 1640 kfree(hpd_rx_offload_wq); 1641 return NULL; 1642 } 1643 1644 struct amdgpu_stutter_quirk { 1645 u16 chip_vendor; 1646 u16 chip_device; 1647 u16 subsys_vendor; 1648 u16 subsys_device; 1649 u8 revision; 1650 }; 1651 1652 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1653 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1654 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1655 { 0, 0, 0, 0, 0 }, 1656 }; 1657 1658 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1659 { 1660 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1661 1662 while (p && p->chip_device != 0) { 1663 if (pdev->vendor == p->chip_vendor && 1664 pdev->device == p->chip_device && 1665 pdev->subsystem_vendor == p->subsys_vendor && 1666 pdev->subsystem_device == p->subsys_device && 1667 pdev->revision == p->revision) { 1668 return true; 1669 } 1670 ++p; 1671 } 1672 return false; 1673 } 1674 1675 1676 void* 1677 dm_allocate_gpu_mem( 1678 struct amdgpu_device *adev, 1679 enum dc_gpu_mem_alloc_type type, 1680 size_t size, 1681 long long *addr) 1682 { 1683 struct dal_allocation *da; 1684 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1685 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1686 int ret; 1687 1688 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 1689 if (!da) 1690 return NULL; 1691 1692 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1693 domain, &da->bo, 1694 &da->gpu_addr, &da->cpu_ptr); 1695 1696 *addr = da->gpu_addr; 1697 1698 if (ret) { 1699 kfree(da); 1700 return NULL; 1701 } 1702 1703 /* add da to list in dm */ 1704 list_add(&da->list, &adev->dm.da_list); 1705 1706 return da->cpu_ptr; 1707 } 1708 1709 void 1710 dm_free_gpu_mem( 1711 struct amdgpu_device *adev, 1712 enum dc_gpu_mem_alloc_type type, 1713 void *pvMem) 1714 { 1715 struct dal_allocation *da; 1716 1717 /* walk the da list in DM */ 1718 list_for_each_entry(da, &adev->dm.da_list, list) { 1719 if (pvMem == da->cpu_ptr) { 1720 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1721 list_del(&da->list); 1722 kfree(da); 1723 break; 1724 } 1725 } 1726 1727 } 1728 1729 static enum dmub_status 1730 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, 1731 enum dmub_gpint_command command_code, 1732 uint16_t param, 1733 uint32_t timeout_us) 1734 { 1735 union dmub_gpint_data_register reg, test; 1736 uint32_t i; 1737 1738 /* Assume that VBIOS DMUB is ready to take commands */ 1739 1740 reg.bits.status = 1; 1741 reg.bits.command_code = command_code; 1742 reg.bits.param = param; 1743 1744 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); 1745 1746 for (i = 0; i < timeout_us; ++i) { 1747 udelay(1); 1748 1749 /* Check if our GPINT got acked */ 1750 reg.bits.status = 0; 1751 test = (union dmub_gpint_data_register) 1752 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); 1753 1754 if (test.all == reg.all) 1755 return DMUB_STATUS_OK; 1756 } 1757 1758 return DMUB_STATUS_TIMEOUT; 1759 } 1760 1761 static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) 1762 { 1763 struct dml2_soc_bb *bb; 1764 long long addr; 1765 int i = 0; 1766 uint16_t chunk; 1767 enum dmub_gpint_command send_addrs[] = { 1768 DMUB_GPINT__SET_BB_ADDR_WORD0, 1769 DMUB_GPINT__SET_BB_ADDR_WORD1, 1770 DMUB_GPINT__SET_BB_ADDR_WORD2, 1771 DMUB_GPINT__SET_BB_ADDR_WORD3, 1772 }; 1773 enum dmub_status ret; 1774 1775 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1776 case IP_VERSION(4, 0, 1): 1777 break; 1778 default: 1779 return NULL; 1780 } 1781 1782 bb = dm_allocate_gpu_mem(adev, 1783 DC_MEM_ALLOC_TYPE_GART, 1784 sizeof(struct dml2_soc_bb), 1785 &addr); 1786 if (!bb) 1787 return NULL; 1788 1789 for (i = 0; i < 4; i++) { 1790 /* Extract 16-bit chunk */ 1791 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; 1792 /* Send the chunk */ 1793 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); 1794 if (ret != DMUB_STATUS_OK) 1795 goto free_bb; 1796 } 1797 1798 /* Now ask DMUB to copy the bb */ 1799 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); 1800 if (ret != DMUB_STATUS_OK) 1801 goto free_bb; 1802 1803 return bb; 1804 1805 free_bb: 1806 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); 1807 return NULL; 1808 1809 } 1810 1811 static enum dmub_ips_disable_type dm_get_default_ips_mode( 1812 struct amdgpu_device *adev) 1813 { 1814 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; 1815 1816 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1817 case IP_VERSION(3, 5, 0): 1818 case IP_VERSION(3, 6, 0): 1819 case IP_VERSION(3, 5, 1): 1820 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1821 break; 1822 default: 1823 /* ASICs older than DCN35 do not have IPSs */ 1824 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) 1825 ret = DMUB_IPS_DISABLE_ALL; 1826 break; 1827 } 1828 1829 return ret; 1830 } 1831 1832 static int amdgpu_dm_init(struct amdgpu_device *adev) 1833 { 1834 struct dc_init_data init_data; 1835 struct dc_callback_init init_params; 1836 int r; 1837 1838 adev->dm.ddev = adev_to_drm(adev); 1839 adev->dm.adev = adev; 1840 1841 /* Zero all the fields */ 1842 memset(&init_data, 0, sizeof(init_data)); 1843 memset(&init_params, 0, sizeof(init_params)); 1844 1845 mutex_init(&adev->dm.dpia_aux_lock); 1846 mutex_init(&adev->dm.dc_lock); 1847 mutex_init(&adev->dm.audio_lock); 1848 1849 if (amdgpu_dm_irq_init(adev)) { 1850 drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n"); 1851 goto error; 1852 } 1853 1854 init_data.asic_id.chip_family = adev->family; 1855 1856 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1857 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1858 init_data.asic_id.chip_id = adev->pdev->device; 1859 1860 init_data.asic_id.vram_width = adev->gmc.vram_width; 1861 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1862 init_data.asic_id.atombios_base_address = 1863 adev->mode_info.atom_context->bios; 1864 1865 init_data.driver = adev; 1866 1867 /* cgs_device was created in dm_sw_init() */ 1868 init_data.cgs_device = adev->dm.cgs_device; 1869 1870 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1871 1872 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1873 case IP_VERSION(2, 1, 0): 1874 switch (adev->dm.dmcub_fw_version) { 1875 case 0: /* development */ 1876 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1877 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1878 init_data.flags.disable_dmcu = false; 1879 break; 1880 default: 1881 init_data.flags.disable_dmcu = true; 1882 } 1883 break; 1884 case IP_VERSION(2, 0, 3): 1885 init_data.flags.disable_dmcu = true; 1886 break; 1887 default: 1888 break; 1889 } 1890 1891 /* APU support S/G display by default except: 1892 * ASICs before Carrizo, 1893 * RAVEN1 (Users reported stability issue) 1894 */ 1895 1896 if (adev->asic_type < CHIP_CARRIZO) { 1897 init_data.flags.gpu_vm_support = false; 1898 } else if (adev->asic_type == CHIP_RAVEN) { 1899 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1900 init_data.flags.gpu_vm_support = false; 1901 else 1902 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1903 } else { 1904 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) 1905 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); 1906 else 1907 init_data.flags.gpu_vm_support = 1908 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1909 } 1910 1911 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1912 1913 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1914 init_data.flags.fbc_support = true; 1915 1916 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1917 init_data.flags.multi_mon_pp_mclk_switch = true; 1918 1919 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1920 init_data.flags.disable_fractional_pwm = true; 1921 1922 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1923 init_data.flags.edp_no_power_sequencing = true; 1924 1925 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1926 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1927 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1928 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1929 1930 init_data.flags.seamless_boot_edp_requested = false; 1931 1932 if (amdgpu_device_seamless_boot_supported(adev)) { 1933 init_data.flags.seamless_boot_edp_requested = true; 1934 init_data.flags.allow_seamless_boot_optimization = true; 1935 drm_dbg(adev->dm.ddev, "Seamless boot requested\n"); 1936 } 1937 1938 init_data.flags.enable_mipi_converter_optimization = true; 1939 1940 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1941 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1942 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1943 1944 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) 1945 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; 1946 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) 1947 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; 1948 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) 1949 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1950 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) 1951 init_data.flags.disable_ips = DMUB_IPS_ENABLE; 1952 else 1953 init_data.flags.disable_ips = dm_get_default_ips_mode(adev); 1954 1955 init_data.flags.disable_ips_in_vpb = 0; 1956 1957 /* Enable DWB for tested platforms only */ 1958 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) 1959 init_data.num_virtual_links = 1; 1960 1961 retrieve_dmi_info(&adev->dm); 1962 if (adev->dm.edp0_on_dp1_quirk) 1963 init_data.flags.support_edp0_on_dp1 = true; 1964 1965 if (adev->dm.bb_from_dmub) 1966 init_data.bb_from_dmub = adev->dm.bb_from_dmub; 1967 else 1968 init_data.bb_from_dmub = NULL; 1969 1970 /* Display Core create. */ 1971 adev->dm.dc = dc_create(&init_data); 1972 1973 if (adev->dm.dc) { 1974 drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER, 1975 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 1976 } else { 1977 drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER); 1978 goto error; 1979 } 1980 1981 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1982 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1983 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1984 } 1985 1986 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1987 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1988 if (dm_should_disable_stutter(adev->pdev)) 1989 adev->dm.dc->debug.disable_stutter = true; 1990 1991 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1992 adev->dm.dc->debug.disable_stutter = true; 1993 1994 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1995 adev->dm.dc->debug.disable_dsc = true; 1996 1997 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1998 adev->dm.dc->debug.disable_clock_gate = true; 1999 2000 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2001 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2002 2003 if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { 2004 adev->dm.dc->debug.force_disable_subvp = true; 2005 adev->dm.dc->debug.fams2_config.bits.enable = false; 2006 } 2007 2008 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2009 adev->dm.dc->debug.using_dml2 = true; 2010 adev->dm.dc->debug.using_dml21 = true; 2011 } 2012 2013 if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) 2014 adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; 2015 2016 if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) 2017 adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; 2018 2019 if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) 2020 adev->dm.dc->debug.skip_detection_link_training = true; 2021 2022 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2023 2024 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 2025 adev->dm.dc->debug.ignore_cable_id = true; 2026 2027 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 2028 drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n"); 2029 2030 r = dm_dmub_hw_init(adev); 2031 if (r) { 2032 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 2033 goto error; 2034 } 2035 2036 dc_hardware_init(adev->dm.dc); 2037 2038 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); 2039 if (!adev->dm.hpd_rx_offload_wq) { 2040 drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n"); 2041 goto error; 2042 } 2043 2044 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 2045 struct dc_phy_addr_space_config pa_config; 2046 2047 mmhub_read_system_context(adev, &pa_config); 2048 2049 // Call the DC init_memory func 2050 dc_setup_system_context(adev->dm.dc, &pa_config); 2051 } 2052 2053 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 2054 if (!adev->dm.freesync_module) { 2055 drm_err(adev_to_drm(adev), 2056 "amdgpu: failed to initialize freesync_module.\n"); 2057 } else 2058 drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", 2059 adev->dm.freesync_module); 2060 2061 amdgpu_dm_init_color_mod(); 2062 2063 if (adev->dm.dc->caps.max_links > 0) { 2064 adev->dm.vblank_control_workqueue = 2065 create_singlethread_workqueue("dm_vblank_control_workqueue"); 2066 if (!adev->dm.vblank_control_workqueue) 2067 drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n"); 2068 } 2069 2070 if (adev->dm.dc->caps.ips_support && 2071 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) 2072 adev->dm.idle_workqueue = idle_create_workqueue(adev); 2073 2074 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 2075 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 2076 2077 if (!adev->dm.hdcp_workqueue) 2078 drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n"); 2079 else 2080 drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 2081 2082 dc_init_callbacks(adev->dm.dc, &init_params); 2083 } 2084 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2085 init_completion(&adev->dm.dmub_aux_transfer_done); 2086 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 2087 if (!adev->dm.dmub_notify) { 2088 drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify"); 2089 goto error; 2090 } 2091 2092 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 2093 if (!adev->dm.delayed_hpd_wq) { 2094 drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n"); 2095 goto error; 2096 } 2097 2098 amdgpu_dm_outbox_init(adev); 2099 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2100 dmub_aux_setconfig_callback, false)) { 2101 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback"); 2102 goto error; 2103 } 2104 2105 for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) 2106 init_completion(&adev->dm.fused_io[i].replied); 2107 2108 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, 2109 dmub_aux_fused_io_callback, false)) { 2110 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback"); 2111 goto error; 2112 } 2113 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 2114 * It is expected that DMUB will resend any pending notifications at this point. Note 2115 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 2116 * align legacy interface initialization sequence. Connection status will be proactivly 2117 * detected once in the amdgpu_dm_initialize_drm_device. 2118 */ 2119 dc_enable_dmub_outbox(adev->dm.dc); 2120 2121 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 2122 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 2123 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 2124 } 2125 2126 if (amdgpu_dm_initialize_drm_device(adev)) { 2127 drm_err(adev_to_drm(adev), 2128 "amdgpu: failed to initialize sw for display support.\n"); 2129 goto error; 2130 } 2131 2132 /* create fake encoders for MST */ 2133 dm_dp_create_fake_mst_encoders(adev); 2134 2135 /* TODO: Add_display_info? */ 2136 2137 /* TODO use dynamic cursor width */ 2138 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 2139 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 2140 2141 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 2142 drm_err(adev_to_drm(adev), 2143 "amdgpu: failed to initialize sw for display support.\n"); 2144 goto error; 2145 } 2146 2147 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2148 amdgpu_dm_crtc_secure_display_create_contexts(adev); 2149 if (!adev->dm.secure_display_ctx.crtc_ctx) 2150 drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n"); 2151 2152 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) 2153 adev->dm.secure_display_ctx.support_mul_roi = true; 2154 2155 #endif 2156 2157 drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); 2158 2159 return 0; 2160 error: 2161 amdgpu_dm_fini(adev); 2162 2163 return -EINVAL; 2164 } 2165 2166 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) 2167 { 2168 struct amdgpu_device *adev = ip_block->adev; 2169 2170 amdgpu_dm_audio_fini(adev); 2171 2172 return 0; 2173 } 2174 2175 static void amdgpu_dm_fini(struct amdgpu_device *adev) 2176 { 2177 int i; 2178 2179 if (adev->dm.vblank_control_workqueue) { 2180 destroy_workqueue(adev->dm.vblank_control_workqueue); 2181 adev->dm.vblank_control_workqueue = NULL; 2182 } 2183 2184 if (adev->dm.idle_workqueue) { 2185 if (adev->dm.idle_workqueue->running) { 2186 adev->dm.idle_workqueue->enable = false; 2187 flush_work(&adev->dm.idle_workqueue->work); 2188 } 2189 2190 kfree(adev->dm.idle_workqueue); 2191 adev->dm.idle_workqueue = NULL; 2192 } 2193 2194 amdgpu_dm_destroy_drm_device(&adev->dm); 2195 2196 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 2197 if (adev->dm.secure_display_ctx.crtc_ctx) { 2198 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2199 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { 2200 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); 2201 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); 2202 } 2203 } 2204 kfree(adev->dm.secure_display_ctx.crtc_ctx); 2205 adev->dm.secure_display_ctx.crtc_ctx = NULL; 2206 } 2207 #endif 2208 if (adev->dm.hdcp_workqueue) { 2209 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 2210 adev->dm.hdcp_workqueue = NULL; 2211 } 2212 2213 if (adev->dm.dc) { 2214 dc_deinit_callbacks(adev->dm.dc); 2215 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 2216 if (dc_enable_dmub_notifications(adev->dm.dc)) { 2217 kfree(adev->dm.dmub_notify); 2218 adev->dm.dmub_notify = NULL; 2219 destroy_workqueue(adev->dm.delayed_hpd_wq); 2220 adev->dm.delayed_hpd_wq = NULL; 2221 } 2222 } 2223 2224 if (adev->dm.dmub_bo) 2225 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 2226 &adev->dm.dmub_bo_gpu_addr, 2227 &adev->dm.dmub_bo_cpu_addr); 2228 2229 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { 2230 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 2231 if (adev->dm.hpd_rx_offload_wq[i].wq) { 2232 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 2233 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 2234 } 2235 } 2236 2237 kfree(adev->dm.hpd_rx_offload_wq); 2238 adev->dm.hpd_rx_offload_wq = NULL; 2239 } 2240 2241 /* DC Destroy TODO: Replace destroy DAL */ 2242 if (adev->dm.dc) 2243 dc_destroy(&adev->dm.dc); 2244 /* 2245 * TODO: pageflip, vlank interrupt 2246 * 2247 * amdgpu_dm_irq_fini(adev); 2248 */ 2249 2250 if (adev->dm.cgs_device) { 2251 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 2252 adev->dm.cgs_device = NULL; 2253 } 2254 if (adev->dm.freesync_module) { 2255 mod_freesync_destroy(adev->dm.freesync_module); 2256 adev->dm.freesync_module = NULL; 2257 } 2258 2259 mutex_destroy(&adev->dm.audio_lock); 2260 mutex_destroy(&adev->dm.dc_lock); 2261 mutex_destroy(&adev->dm.dpia_aux_lock); 2262 } 2263 2264 static int load_dmcu_fw(struct amdgpu_device *adev) 2265 { 2266 const char *fw_name_dmcu = NULL; 2267 int r; 2268 const struct dmcu_firmware_header_v1_0 *hdr; 2269 2270 switch (adev->asic_type) { 2271 #if defined(CONFIG_DRM_AMD_DC_SI) 2272 case CHIP_TAHITI: 2273 case CHIP_PITCAIRN: 2274 case CHIP_VERDE: 2275 case CHIP_OLAND: 2276 #endif 2277 case CHIP_BONAIRE: 2278 case CHIP_HAWAII: 2279 case CHIP_KAVERI: 2280 case CHIP_KABINI: 2281 case CHIP_MULLINS: 2282 case CHIP_TONGA: 2283 case CHIP_FIJI: 2284 case CHIP_CARRIZO: 2285 case CHIP_STONEY: 2286 case CHIP_POLARIS11: 2287 case CHIP_POLARIS10: 2288 case CHIP_POLARIS12: 2289 case CHIP_VEGAM: 2290 case CHIP_VEGA10: 2291 case CHIP_VEGA12: 2292 case CHIP_VEGA20: 2293 return 0; 2294 case CHIP_NAVI12: 2295 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 2296 break; 2297 case CHIP_RAVEN: 2298 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2299 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2300 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2301 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2302 else 2303 return 0; 2304 break; 2305 default: 2306 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2307 case IP_VERSION(2, 0, 2): 2308 case IP_VERSION(2, 0, 3): 2309 case IP_VERSION(2, 0, 0): 2310 case IP_VERSION(2, 1, 0): 2311 case IP_VERSION(3, 0, 0): 2312 case IP_VERSION(3, 0, 2): 2313 case IP_VERSION(3, 0, 3): 2314 case IP_VERSION(3, 0, 1): 2315 case IP_VERSION(3, 1, 2): 2316 case IP_VERSION(3, 1, 3): 2317 case IP_VERSION(3, 1, 4): 2318 case IP_VERSION(3, 1, 5): 2319 case IP_VERSION(3, 1, 6): 2320 case IP_VERSION(3, 2, 0): 2321 case IP_VERSION(3, 2, 1): 2322 case IP_VERSION(3, 5, 0): 2323 case IP_VERSION(3, 5, 1): 2324 case IP_VERSION(3, 6, 0): 2325 case IP_VERSION(4, 0, 1): 2326 return 0; 2327 default: 2328 break; 2329 } 2330 drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type); 2331 return -EINVAL; 2332 } 2333 2334 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2335 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2336 return 0; 2337 } 2338 2339 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, 2340 "%s", fw_name_dmcu); 2341 if (r == -ENODEV) { 2342 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2343 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2344 adev->dm.fw_dmcu = NULL; 2345 return 0; 2346 } 2347 if (r) { 2348 drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n", 2349 fw_name_dmcu); 2350 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2351 return r; 2352 } 2353 2354 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2355 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2356 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2357 adev->firmware.fw_size += 2358 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2359 2360 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2361 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2362 adev->firmware.fw_size += 2363 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2364 2365 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2366 2367 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2368 2369 return 0; 2370 } 2371 2372 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2373 { 2374 struct amdgpu_device *adev = ctx; 2375 2376 return dm_read_reg(adev->dm.dc->ctx, address); 2377 } 2378 2379 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2380 uint32_t value) 2381 { 2382 struct amdgpu_device *adev = ctx; 2383 2384 return dm_write_reg(adev->dm.dc->ctx, address, value); 2385 } 2386 2387 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2388 { 2389 struct dmub_srv_create_params create_params; 2390 struct dmub_srv_region_params region_params; 2391 struct dmub_srv_region_info region_info; 2392 struct dmub_srv_memory_params memory_params; 2393 struct dmub_srv_fb_info *fb_info; 2394 struct dmub_srv *dmub_srv; 2395 const struct dmcub_firmware_header_v1_0 *hdr; 2396 enum dmub_asic dmub_asic; 2397 enum dmub_status status; 2398 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { 2399 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST 2400 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK 2401 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA 2402 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS 2403 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX 2404 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF 2405 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE 2406 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM 2407 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE 2408 }; 2409 int r; 2410 2411 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2412 case IP_VERSION(2, 1, 0): 2413 dmub_asic = DMUB_ASIC_DCN21; 2414 break; 2415 case IP_VERSION(3, 0, 0): 2416 dmub_asic = DMUB_ASIC_DCN30; 2417 break; 2418 case IP_VERSION(3, 0, 1): 2419 dmub_asic = DMUB_ASIC_DCN301; 2420 break; 2421 case IP_VERSION(3, 0, 2): 2422 dmub_asic = DMUB_ASIC_DCN302; 2423 break; 2424 case IP_VERSION(3, 0, 3): 2425 dmub_asic = DMUB_ASIC_DCN303; 2426 break; 2427 case IP_VERSION(3, 1, 2): 2428 case IP_VERSION(3, 1, 3): 2429 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2430 break; 2431 case IP_VERSION(3, 1, 4): 2432 dmub_asic = DMUB_ASIC_DCN314; 2433 break; 2434 case IP_VERSION(3, 1, 5): 2435 dmub_asic = DMUB_ASIC_DCN315; 2436 break; 2437 case IP_VERSION(3, 1, 6): 2438 dmub_asic = DMUB_ASIC_DCN316; 2439 break; 2440 case IP_VERSION(3, 2, 0): 2441 dmub_asic = DMUB_ASIC_DCN32; 2442 break; 2443 case IP_VERSION(3, 2, 1): 2444 dmub_asic = DMUB_ASIC_DCN321; 2445 break; 2446 case IP_VERSION(3, 5, 0): 2447 case IP_VERSION(3, 5, 1): 2448 dmub_asic = DMUB_ASIC_DCN35; 2449 break; 2450 case IP_VERSION(3, 6, 0): 2451 dmub_asic = DMUB_ASIC_DCN36; 2452 break; 2453 case IP_VERSION(4, 0, 1): 2454 dmub_asic = DMUB_ASIC_DCN401; 2455 break; 2456 2457 default: 2458 /* ASIC doesn't support DMUB. */ 2459 return 0; 2460 } 2461 2462 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2463 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2464 2465 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2466 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2467 AMDGPU_UCODE_ID_DMCUB; 2468 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2469 adev->dm.dmub_fw; 2470 adev->firmware.fw_size += 2471 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2472 2473 drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n", 2474 adev->dm.dmcub_fw_version); 2475 } 2476 2477 2478 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2479 dmub_srv = adev->dm.dmub_srv; 2480 2481 if (!dmub_srv) { 2482 drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n"); 2483 return -ENOMEM; 2484 } 2485 2486 memset(&create_params, 0, sizeof(create_params)); 2487 create_params.user_ctx = adev; 2488 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2489 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2490 create_params.asic = dmub_asic; 2491 2492 /* Create the DMUB service. */ 2493 status = dmub_srv_create(dmub_srv, &create_params); 2494 if (status != DMUB_STATUS_OK) { 2495 drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status); 2496 return -EINVAL; 2497 } 2498 2499 /* Calculate the size of all the regions for the DMUB service. */ 2500 memset(®ion_params, 0, sizeof(region_params)); 2501 2502 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2503 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2504 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2505 region_params.vbios_size = adev->bios_size; 2506 region_params.fw_bss_data = region_params.bss_data_size ? 2507 adev->dm.dmub_fw->data + 2508 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2509 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2510 region_params.fw_inst_const = 2511 adev->dm.dmub_fw->data + 2512 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2513 PSP_HEADER_BYTES; 2514 region_params.window_memory_type = window_memory_type; 2515 2516 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2517 ®ion_info); 2518 2519 if (status != DMUB_STATUS_OK) { 2520 drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status); 2521 return -EINVAL; 2522 } 2523 2524 /* 2525 * Allocate a framebuffer based on the total size of all the regions. 2526 * TODO: Move this into GART. 2527 */ 2528 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2529 AMDGPU_GEM_DOMAIN_VRAM | 2530 AMDGPU_GEM_DOMAIN_GTT, 2531 &adev->dm.dmub_bo, 2532 &adev->dm.dmub_bo_gpu_addr, 2533 &adev->dm.dmub_bo_cpu_addr); 2534 if (r) 2535 return r; 2536 2537 /* Rebase the regions on the framebuffer address. */ 2538 memset(&memory_params, 0, sizeof(memory_params)); 2539 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2540 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2541 memory_params.region_info = ®ion_info; 2542 memory_params.window_memory_type = window_memory_type; 2543 2544 adev->dm.dmub_fb_info = 2545 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2546 fb_info = adev->dm.dmub_fb_info; 2547 2548 if (!fb_info) { 2549 drm_err(adev_to_drm(adev), 2550 "Failed to allocate framebuffer info for DMUB service!\n"); 2551 return -ENOMEM; 2552 } 2553 2554 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2555 if (status != DMUB_STATUS_OK) { 2556 drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status); 2557 return -EINVAL; 2558 } 2559 2560 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); 2561 2562 return 0; 2563 } 2564 2565 static int dm_sw_init(struct amdgpu_ip_block *ip_block) 2566 { 2567 struct amdgpu_device *adev = ip_block->adev; 2568 int r; 2569 2570 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 2571 2572 if (!adev->dm.cgs_device) { 2573 drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n"); 2574 return -EINVAL; 2575 } 2576 2577 /* Moved from dm init since we need to use allocations for storing bounding box data */ 2578 INIT_LIST_HEAD(&adev->dm.da_list); 2579 2580 r = dm_dmub_sw_init(adev); 2581 if (r) 2582 return r; 2583 2584 return load_dmcu_fw(adev); 2585 } 2586 2587 static int dm_sw_fini(struct amdgpu_ip_block *ip_block) 2588 { 2589 struct amdgpu_device *adev = ip_block->adev; 2590 struct dal_allocation *da; 2591 2592 list_for_each_entry(da, &adev->dm.da_list, list) { 2593 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { 2594 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 2595 list_del(&da->list); 2596 kfree(da); 2597 adev->dm.bb_from_dmub = NULL; 2598 break; 2599 } 2600 } 2601 2602 2603 kfree(adev->dm.dmub_fb_info); 2604 adev->dm.dmub_fb_info = NULL; 2605 2606 if (adev->dm.dmub_srv) { 2607 dmub_srv_destroy(adev->dm.dmub_srv); 2608 kfree(adev->dm.dmub_srv); 2609 adev->dm.dmub_srv = NULL; 2610 } 2611 2612 amdgpu_ucode_release(&adev->dm.dmub_fw); 2613 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2614 2615 return 0; 2616 } 2617 2618 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2619 { 2620 struct amdgpu_dm_connector *aconnector; 2621 struct drm_connector *connector; 2622 struct drm_connector_list_iter iter; 2623 int ret = 0; 2624 2625 drm_connector_list_iter_begin(dev, &iter); 2626 drm_for_each_connector_iter(connector, &iter) { 2627 2628 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2629 continue; 2630 2631 aconnector = to_amdgpu_dm_connector(connector); 2632 if (aconnector->dc_link->type == dc_connection_mst_branch && 2633 aconnector->mst_mgr.aux) { 2634 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", 2635 aconnector, 2636 aconnector->base.base.id); 2637 2638 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2639 if (ret < 0) { 2640 drm_err(dev, "DM_MST: Failed to start MST\n"); 2641 aconnector->dc_link->type = 2642 dc_connection_single; 2643 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2644 aconnector->dc_link); 2645 break; 2646 } 2647 } 2648 } 2649 drm_connector_list_iter_end(&iter); 2650 2651 return ret; 2652 } 2653 2654 static int dm_late_init(struct amdgpu_ip_block *ip_block) 2655 { 2656 struct amdgpu_device *adev = ip_block->adev; 2657 2658 struct dmcu_iram_parameters params; 2659 unsigned int linear_lut[16]; 2660 int i; 2661 struct dmcu *dmcu = NULL; 2662 2663 dmcu = adev->dm.dc->res_pool->dmcu; 2664 2665 for (i = 0; i < 16; i++) 2666 linear_lut[i] = 0xFFFF * i / 15; 2667 2668 params.set = 0; 2669 params.backlight_ramping_override = false; 2670 params.backlight_ramping_start = 0xCCCC; 2671 params.backlight_ramping_reduction = 0xCCCCCCCC; 2672 params.backlight_lut_array_size = 16; 2673 params.backlight_lut_array = linear_lut; 2674 2675 /* Min backlight level after ABM reduction, Don't allow below 1% 2676 * 0xFFFF x 0.01 = 0x28F 2677 */ 2678 params.min_abm_backlight = 0x28F; 2679 /* In the case where abm is implemented on dmcub, 2680 * dmcu object will be null. 2681 * ABM 2.4 and up are implemented on dmcub. 2682 */ 2683 if (dmcu) { 2684 if (!dmcu_load_iram(dmcu, params)) 2685 return -EINVAL; 2686 } else if (adev->dm.dc->ctx->dmub_srv) { 2687 struct dc_link *edp_links[MAX_NUM_EDP]; 2688 int edp_num; 2689 2690 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2691 for (i = 0; i < edp_num; i++) { 2692 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2693 return -EINVAL; 2694 } 2695 } 2696 2697 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2698 } 2699 2700 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2701 { 2702 u8 buf[UUID_SIZE]; 2703 guid_t guid; 2704 int ret; 2705 2706 mutex_lock(&mgr->lock); 2707 if (!mgr->mst_primary) 2708 goto out_fail; 2709 2710 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2711 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2712 goto out_fail; 2713 } 2714 2715 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2716 DP_MST_EN | 2717 DP_UP_REQ_EN | 2718 DP_UPSTREAM_IS_SRC); 2719 if (ret < 0) { 2720 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2721 goto out_fail; 2722 } 2723 2724 /* Some hubs forget their guids after they resume */ 2725 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); 2726 if (ret != sizeof(buf)) { 2727 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2728 goto out_fail; 2729 } 2730 2731 import_guid(&guid, buf); 2732 2733 if (guid_is_null(&guid)) { 2734 guid_gen(&guid); 2735 export_guid(buf, &guid); 2736 2737 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); 2738 2739 if (ret != sizeof(buf)) { 2740 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2741 goto out_fail; 2742 } 2743 } 2744 2745 guid_copy(&mgr->mst_primary->guid, &guid); 2746 2747 out_fail: 2748 mutex_unlock(&mgr->lock); 2749 } 2750 2751 void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector) 2752 { 2753 struct cec_notifier *n = aconnector->notifier; 2754 2755 if (!n) 2756 return; 2757 2758 cec_notifier_phys_addr_invalidate(n); 2759 } 2760 2761 void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector) 2762 { 2763 struct drm_connector *connector = &aconnector->base; 2764 struct cec_notifier *n = aconnector->notifier; 2765 2766 if (!n) 2767 return; 2768 2769 cec_notifier_set_phys_addr(n, 2770 connector->display_info.source_physical_address); 2771 } 2772 2773 static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend) 2774 { 2775 struct amdgpu_dm_connector *aconnector; 2776 struct drm_connector *connector; 2777 struct drm_connector_list_iter conn_iter; 2778 2779 drm_connector_list_iter_begin(ddev, &conn_iter); 2780 drm_for_each_connector_iter(connector, &conn_iter) { 2781 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2782 continue; 2783 2784 aconnector = to_amdgpu_dm_connector(connector); 2785 if (suspend) 2786 hdmi_cec_unset_edid(aconnector); 2787 else 2788 hdmi_cec_set_edid(aconnector); 2789 } 2790 drm_connector_list_iter_end(&conn_iter); 2791 } 2792 2793 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2794 { 2795 struct amdgpu_dm_connector *aconnector; 2796 struct drm_connector *connector; 2797 struct drm_connector_list_iter iter; 2798 struct drm_dp_mst_topology_mgr *mgr; 2799 2800 drm_connector_list_iter_begin(dev, &iter); 2801 drm_for_each_connector_iter(connector, &iter) { 2802 2803 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2804 continue; 2805 2806 aconnector = to_amdgpu_dm_connector(connector); 2807 if (aconnector->dc_link->type != dc_connection_mst_branch || 2808 aconnector->mst_root) 2809 continue; 2810 2811 mgr = &aconnector->mst_mgr; 2812 2813 if (suspend) { 2814 drm_dp_mst_topology_mgr_suspend(mgr); 2815 } else { 2816 /* if extended timeout is supported in hardware, 2817 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2818 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2819 */ 2820 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2821 if (!dp_is_lttpr_present(aconnector->dc_link)) 2822 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2823 2824 /* TODO: move resume_mst_branch_status() into drm mst resume again 2825 * once topology probing work is pulled out from mst resume into mst 2826 * resume 2nd step. mst resume 2nd step should be called after old 2827 * state getting restored (i.e. drm_atomic_helper_resume()). 2828 */ 2829 resume_mst_branch_status(mgr); 2830 } 2831 } 2832 drm_connector_list_iter_end(&iter); 2833 } 2834 2835 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2836 { 2837 int ret = 0; 2838 2839 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2840 * on window driver dc implementation. 2841 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2842 * should be passed to smu during boot up and resume from s3. 2843 * boot up: dc calculate dcn watermark clock settings within dc_create, 2844 * dcn20_resource_construct 2845 * then call pplib functions below to pass the settings to smu: 2846 * smu_set_watermarks_for_clock_ranges 2847 * smu_set_watermarks_table 2848 * navi10_set_watermarks_table 2849 * smu_write_watermarks_table 2850 * 2851 * For Renoir, clock settings of dcn watermark are also fixed values. 2852 * dc has implemented different flow for window driver: 2853 * dc_hardware_init / dc_set_power_state 2854 * dcn10_init_hw 2855 * notify_wm_ranges 2856 * set_wm_ranges 2857 * -- Linux 2858 * smu_set_watermarks_for_clock_ranges 2859 * renoir_set_watermarks_table 2860 * smu_write_watermarks_table 2861 * 2862 * For Linux, 2863 * dc_hardware_init -> amdgpu_dm_init 2864 * dc_set_power_state --> dm_resume 2865 * 2866 * therefore, this function apply to navi10/12/14 but not Renoir 2867 * * 2868 */ 2869 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2870 case IP_VERSION(2, 0, 2): 2871 case IP_VERSION(2, 0, 0): 2872 break; 2873 default: 2874 return 0; 2875 } 2876 2877 ret = amdgpu_dpm_write_watermarks_table(adev); 2878 if (ret) { 2879 drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n"); 2880 return ret; 2881 } 2882 2883 return 0; 2884 } 2885 2886 static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) 2887 { 2888 struct amdgpu_display_manager *dm = &adev->dm; 2889 struct amdgpu_i2c_adapter *oem_i2c; 2890 struct ddc_service *oem_ddc_service; 2891 int r; 2892 2893 oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc); 2894 if (oem_ddc_service) { 2895 oem_i2c = create_i2c(oem_ddc_service, true); 2896 if (!oem_i2c) { 2897 drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n"); 2898 return -ENOMEM; 2899 } 2900 2901 r = i2c_add_adapter(&oem_i2c->base); 2902 if (r) { 2903 drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); 2904 kfree(oem_i2c); 2905 return r; 2906 } 2907 dm->oem_i2c = oem_i2c; 2908 } 2909 2910 return 0; 2911 } 2912 2913 /** 2914 * dm_hw_init() - Initialize DC device 2915 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2916 * 2917 * Initialize the &struct amdgpu_display_manager device. This involves calling 2918 * the initializers of each DM component, then populating the struct with them. 2919 * 2920 * Although the function implies hardware initialization, both hardware and 2921 * software are initialized here. Splitting them out to their relevant init 2922 * hooks is a future TODO item. 2923 * 2924 * Some notable things that are initialized here: 2925 * 2926 * - Display Core, both software and hardware 2927 * - DC modules that we need (freesync and color management) 2928 * - DRM software states 2929 * - Interrupt sources and handlers 2930 * - Vblank support 2931 * - Debug FS entries, if enabled 2932 */ 2933 static int dm_hw_init(struct amdgpu_ip_block *ip_block) 2934 { 2935 struct amdgpu_device *adev = ip_block->adev; 2936 int r; 2937 2938 /* Create DAL display manager */ 2939 r = amdgpu_dm_init(adev); 2940 if (r) 2941 return r; 2942 amdgpu_dm_hpd_init(adev); 2943 2944 r = dm_oem_i2c_hw_init(adev); 2945 if (r) 2946 drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n"); 2947 2948 return 0; 2949 } 2950 2951 /** 2952 * dm_hw_fini() - Teardown DC device 2953 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 2954 * 2955 * Teardown components within &struct amdgpu_display_manager that require 2956 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2957 * were loaded. Also flush IRQ workqueues and disable them. 2958 */ 2959 static int dm_hw_fini(struct amdgpu_ip_block *ip_block) 2960 { 2961 struct amdgpu_device *adev = ip_block->adev; 2962 2963 kfree(adev->dm.oem_i2c); 2964 2965 amdgpu_dm_hpd_fini(adev); 2966 2967 amdgpu_dm_irq_fini(adev); 2968 amdgpu_dm_fini(adev); 2969 return 0; 2970 } 2971 2972 2973 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2974 struct dc_state *state, bool enable) 2975 { 2976 enum dc_irq_source irq_source; 2977 struct amdgpu_crtc *acrtc; 2978 int rc = -EBUSY; 2979 int i = 0; 2980 2981 for (i = 0; i < state->stream_count; i++) { 2982 acrtc = get_crtc_by_otg_inst( 2983 adev, state->stream_status[i].primary_otg_inst); 2984 2985 if (acrtc && state->stream_status[i].plane_count != 0) { 2986 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2987 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2988 if (rc) 2989 drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", 2990 enable ? "enable" : "disable"); 2991 2992 if (enable) { 2993 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2994 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2995 } else 2996 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2997 2998 if (rc) 2999 drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 3000 3001 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 3002 /* During gpu-reset we disable and then enable vblank irq, so 3003 * don't use amdgpu_irq_get/put() to avoid refcount change. 3004 */ 3005 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 3006 drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 3007 } 3008 } 3009 3010 } 3011 3012 DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T)) 3013 3014 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 3015 { 3016 struct dc_state *context __free(state_release) = NULL; 3017 int i; 3018 struct dc_stream_state *del_streams[MAX_PIPES]; 3019 int del_streams_count = 0; 3020 struct dc_commit_streams_params params = {}; 3021 3022 memset(del_streams, 0, sizeof(del_streams)); 3023 3024 context = dc_state_create_current_copy(dc); 3025 if (context == NULL) 3026 return DC_ERROR_UNEXPECTED; 3027 3028 /* First remove from context all streams */ 3029 for (i = 0; i < context->stream_count; i++) { 3030 struct dc_stream_state *stream = context->streams[i]; 3031 3032 del_streams[del_streams_count++] = stream; 3033 } 3034 3035 /* Remove all planes for removed streams and then remove the streams */ 3036 for (i = 0; i < del_streams_count; i++) { 3037 enum dc_status res; 3038 3039 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) 3040 return DC_FAIL_DETACH_SURFACES; 3041 3042 res = dc_state_remove_stream(dc, context, del_streams[i]); 3043 if (res != DC_OK) 3044 return res; 3045 } 3046 3047 params.streams = context->streams; 3048 params.stream_count = context->stream_count; 3049 3050 return dc_commit_streams(dc, ¶ms); 3051 } 3052 3053 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 3054 { 3055 int i; 3056 3057 if (dm->hpd_rx_offload_wq) { 3058 for (i = 0; i < dm->dc->caps.max_links; i++) 3059 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 3060 } 3061 } 3062 3063 static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) 3064 { 3065 struct amdgpu_device *adev = ip_block->adev; 3066 3067 if (amdgpu_in_reset(adev)) 3068 return 0; 3069 3070 WARN_ON(adev->dm.cached_state); 3071 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3072 if (IS_ERR(adev->dm.cached_state)) 3073 return PTR_ERR(adev->dm.cached_state); 3074 3075 return 0; 3076 } 3077 3078 static int dm_suspend(struct amdgpu_ip_block *ip_block) 3079 { 3080 struct amdgpu_device *adev = ip_block->adev; 3081 struct amdgpu_display_manager *dm = &adev->dm; 3082 3083 if (amdgpu_in_reset(adev)) { 3084 enum dc_status res; 3085 3086 mutex_lock(&dm->dc_lock); 3087 3088 dc_allow_idle_optimizations(adev->dm.dc, false); 3089 3090 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 3091 3092 if (dm->cached_dc_state) 3093 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 3094 3095 res = amdgpu_dm_commit_zero_streams(dm->dc); 3096 if (res != DC_OK) { 3097 drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res); 3098 return -EINVAL; 3099 } 3100 3101 amdgpu_dm_irq_suspend(adev); 3102 3103 hpd_rx_irq_work_suspend(dm); 3104 3105 return 0; 3106 } 3107 3108 if (!adev->dm.cached_state) { 3109 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 3110 if (IS_ERR(adev->dm.cached_state)) 3111 return PTR_ERR(adev->dm.cached_state); 3112 } 3113 3114 s3_handle_hdmi_cec(adev_to_drm(adev), true); 3115 3116 s3_handle_mst(adev_to_drm(adev), true); 3117 3118 amdgpu_dm_irq_suspend(adev); 3119 3120 hpd_rx_irq_work_suspend(dm); 3121 3122 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 3123 3124 if (dm->dc->caps.ips_support && adev->in_s0ix) 3125 dc_allow_idle_optimizations(dm->dc, true); 3126 3127 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 3128 3129 return 0; 3130 } 3131 3132 struct drm_connector * 3133 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 3134 struct drm_crtc *crtc) 3135 { 3136 u32 i; 3137 struct drm_connector_state *new_con_state; 3138 struct drm_connector *connector; 3139 struct drm_crtc *crtc_from_state; 3140 3141 for_each_new_connector_in_state(state, connector, new_con_state, i) { 3142 crtc_from_state = new_con_state->crtc; 3143 3144 if (crtc_from_state == crtc) 3145 return connector; 3146 } 3147 3148 return NULL; 3149 } 3150 3151 static void emulated_link_detect(struct dc_link *link) 3152 { 3153 struct dc_sink_init_data sink_init_data = { 0 }; 3154 struct display_sink_capability sink_caps = { 0 }; 3155 enum dc_edid_status edid_status; 3156 struct dc_context *dc_ctx = link->ctx; 3157 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 3158 struct dc_sink *sink = NULL; 3159 struct dc_sink *prev_sink = NULL; 3160 3161 link->type = dc_connection_none; 3162 prev_sink = link->local_sink; 3163 3164 if (prev_sink) 3165 dc_sink_release(prev_sink); 3166 3167 switch (link->connector_signal) { 3168 case SIGNAL_TYPE_HDMI_TYPE_A: { 3169 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3170 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 3171 break; 3172 } 3173 3174 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 3175 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3176 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 3177 break; 3178 } 3179 3180 case SIGNAL_TYPE_DVI_DUAL_LINK: { 3181 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3182 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 3183 break; 3184 } 3185 3186 case SIGNAL_TYPE_LVDS: { 3187 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 3188 sink_caps.signal = SIGNAL_TYPE_LVDS; 3189 break; 3190 } 3191 3192 case SIGNAL_TYPE_EDP: { 3193 sink_caps.transaction_type = 3194 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3195 sink_caps.signal = SIGNAL_TYPE_EDP; 3196 break; 3197 } 3198 3199 case SIGNAL_TYPE_DISPLAY_PORT: { 3200 sink_caps.transaction_type = 3201 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 3202 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 3203 break; 3204 } 3205 3206 default: 3207 drm_err(dev, "Invalid connector type! signal:%d\n", 3208 link->connector_signal); 3209 return; 3210 } 3211 3212 sink_init_data.link = link; 3213 sink_init_data.sink_signal = sink_caps.signal; 3214 3215 sink = dc_sink_create(&sink_init_data); 3216 if (!sink) { 3217 drm_err(dev, "Failed to create sink!\n"); 3218 return; 3219 } 3220 3221 /* dc_sink_create returns a new reference */ 3222 link->local_sink = sink; 3223 3224 edid_status = dm_helpers_read_local_edid( 3225 link->ctx, 3226 link, 3227 sink); 3228 3229 if (edid_status != EDID_OK) 3230 drm_err(dev, "Failed to read EDID\n"); 3231 3232 } 3233 3234 static void dm_gpureset_commit_state(struct dc_state *dc_state, 3235 struct amdgpu_display_manager *dm) 3236 { 3237 struct { 3238 struct dc_surface_update surface_updates[MAX_SURFACES]; 3239 struct dc_plane_info plane_infos[MAX_SURFACES]; 3240 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 3241 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 3242 struct dc_stream_update stream_update; 3243 } *bundle __free(kfree); 3244 int k, m; 3245 3246 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 3247 3248 if (!bundle) { 3249 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 3250 return; 3251 } 3252 3253 for (k = 0; k < dc_state->stream_count; k++) { 3254 bundle->stream_update.stream = dc_state->streams[k]; 3255 3256 for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { 3257 bundle->surface_updates[m].surface = 3258 dc_state->stream_status[k].plane_states[m]; 3259 bundle->surface_updates[m].surface->force_full_update = 3260 true; 3261 } 3262 3263 update_planes_and_stream_adapter(dm->dc, 3264 UPDATE_TYPE_FULL, 3265 dc_state->stream_status[k].plane_count, 3266 dc_state->streams[k], 3267 &bundle->stream_update, 3268 bundle->surface_updates); 3269 } 3270 } 3271 3272 static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, 3273 struct dc_sink *sink) 3274 { 3275 struct dc_panel_patch *ppatch = NULL; 3276 3277 if (!sink) 3278 return; 3279 3280 ppatch = &sink->edid_caps.panel_patch; 3281 if (ppatch->wait_after_dpcd_poweroff_ms) { 3282 msleep(ppatch->wait_after_dpcd_poweroff_ms); 3283 drm_dbg_driver(adev_to_drm(adev), 3284 "%s: adding a %ds delay as w/a for panel\n", 3285 __func__, 3286 ppatch->wait_after_dpcd_poweroff_ms / 1000); 3287 } 3288 } 3289 3290 static int dm_resume(struct amdgpu_ip_block *ip_block) 3291 { 3292 struct amdgpu_device *adev = ip_block->adev; 3293 struct drm_device *ddev = adev_to_drm(adev); 3294 struct amdgpu_display_manager *dm = &adev->dm; 3295 struct amdgpu_dm_connector *aconnector; 3296 struct drm_connector *connector; 3297 struct drm_connector_list_iter iter; 3298 struct drm_crtc *crtc; 3299 struct drm_crtc_state *new_crtc_state; 3300 struct dm_crtc_state *dm_new_crtc_state; 3301 struct drm_plane *plane; 3302 struct drm_plane_state *new_plane_state; 3303 struct dm_plane_state *dm_new_plane_state; 3304 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 3305 enum dc_connection_type new_connection_type = dc_connection_none; 3306 struct dc_state *dc_state; 3307 int i, r, j; 3308 struct dc_commit_streams_params commit_params = {}; 3309 3310 if (dm->dc->caps.ips_support) { 3311 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3312 } 3313 3314 if (amdgpu_in_reset(adev)) { 3315 dc_state = dm->cached_dc_state; 3316 3317 /* 3318 * The dc->current_state is backed up into dm->cached_dc_state 3319 * before we commit 0 streams. 3320 * 3321 * DC will clear link encoder assignments on the real state 3322 * but the changes won't propagate over to the copy we made 3323 * before the 0 streams commit. 3324 * 3325 * DC expects that link encoder assignments are *not* valid 3326 * when committing a state, so as a workaround we can copy 3327 * off of the current state. 3328 * 3329 * We lose the previous assignments, but we had already 3330 * commit 0 streams anyway. 3331 */ 3332 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 3333 3334 r = dm_dmub_hw_init(adev); 3335 if (r) 3336 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); 3337 3338 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3339 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3340 3341 dc_resume(dm->dc); 3342 3343 amdgpu_dm_irq_resume_early(adev); 3344 3345 for (i = 0; i < dc_state->stream_count; i++) { 3346 dc_state->streams[i]->mode_changed = true; 3347 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 3348 dc_state->stream_status[i].plane_states[j]->update_flags.raw 3349 = 0xffffffff; 3350 } 3351 } 3352 3353 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3354 amdgpu_dm_outbox_init(adev); 3355 dc_enable_dmub_outbox(adev->dm.dc); 3356 } 3357 3358 commit_params.streams = dc_state->streams; 3359 commit_params.stream_count = dc_state->stream_count; 3360 dc_exit_ips_for_hw_access(dm->dc); 3361 WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); 3362 3363 dm_gpureset_commit_state(dm->cached_dc_state, dm); 3364 3365 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 3366 3367 dc_state_release(dm->cached_dc_state); 3368 dm->cached_dc_state = NULL; 3369 3370 amdgpu_dm_irq_resume_late(adev); 3371 3372 mutex_unlock(&dm->dc_lock); 3373 3374 /* set the backlight after a reset */ 3375 for (i = 0; i < dm->num_of_edps; i++) { 3376 if (dm->backlight_dev[i]) 3377 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 3378 } 3379 3380 return 0; 3381 } 3382 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3383 dc_state_release(dm_state->context); 3384 dm_state->context = dc_state_create(dm->dc, NULL); 3385 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 3386 3387 /* Before powering on DC we need to re-initialize DMUB. */ 3388 dm_dmub_hw_resume(adev); 3389 3390 /* Re-enable outbox interrupts for DPIA. */ 3391 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3392 amdgpu_dm_outbox_init(adev); 3393 dc_enable_dmub_outbox(adev->dm.dc); 3394 } 3395 3396 /* power on hardware */ 3397 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3398 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3399 3400 /* program HPD filter */ 3401 dc_resume(dm->dc); 3402 3403 /* 3404 * early enable HPD Rx IRQ, should be done before set mode as short 3405 * pulse interrupts are used for MST 3406 */ 3407 amdgpu_dm_irq_resume_early(adev); 3408 3409 s3_handle_hdmi_cec(ddev, false); 3410 3411 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 3412 s3_handle_mst(ddev, false); 3413 3414 /* Do detection*/ 3415 drm_connector_list_iter_begin(ddev, &iter); 3416 drm_for_each_connector_iter(connector, &iter) { 3417 bool ret; 3418 3419 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3420 continue; 3421 3422 aconnector = to_amdgpu_dm_connector(connector); 3423 3424 if (!aconnector->dc_link) 3425 continue; 3426 3427 /* 3428 * this is the case when traversing through already created end sink 3429 * MST connectors, should be skipped 3430 */ 3431 if (aconnector->mst_root) 3432 continue; 3433 3434 guard(mutex)(&aconnector->hpd_lock); 3435 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3436 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 3437 3438 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3439 emulated_link_detect(aconnector->dc_link); 3440 } else { 3441 guard(mutex)(&dm->dc_lock); 3442 dc_exit_ips_for_hw_access(dm->dc); 3443 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); 3444 if (ret) { 3445 /* w/a delay for certain panels */ 3446 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); 3447 } 3448 } 3449 3450 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 3451 aconnector->fake_enable = false; 3452 3453 if (aconnector->dc_sink) 3454 dc_sink_release(aconnector->dc_sink); 3455 aconnector->dc_sink = NULL; 3456 amdgpu_dm_update_connector_after_detect(aconnector); 3457 } 3458 drm_connector_list_iter_end(&iter); 3459 3460 /* Force mode set in atomic commit */ 3461 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3462 new_crtc_state->active_changed = true; 3463 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3464 reset_freesync_config_for_crtc(dm_new_crtc_state); 3465 } 3466 3467 /* 3468 * atomic_check is expected to create the dc states. We need to release 3469 * them here, since they were duplicated as part of the suspend 3470 * procedure. 3471 */ 3472 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 3473 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 3474 if (dm_new_crtc_state->stream) { 3475 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 3476 dc_stream_release(dm_new_crtc_state->stream); 3477 dm_new_crtc_state->stream = NULL; 3478 } 3479 dm_new_crtc_state->base.color_mgmt_changed = true; 3480 } 3481 3482 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 3483 dm_new_plane_state = to_dm_plane_state(new_plane_state); 3484 if (dm_new_plane_state->dc_state) { 3485 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 3486 dc_plane_state_release(dm_new_plane_state->dc_state); 3487 dm_new_plane_state->dc_state = NULL; 3488 } 3489 } 3490 3491 drm_atomic_helper_resume(ddev, dm->cached_state); 3492 3493 dm->cached_state = NULL; 3494 3495 /* Do mst topology probing after resuming cached state*/ 3496 drm_connector_list_iter_begin(ddev, &iter); 3497 drm_for_each_connector_iter(connector, &iter) { 3498 3499 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 3500 continue; 3501 3502 aconnector = to_amdgpu_dm_connector(connector); 3503 if (aconnector->dc_link->type != dc_connection_mst_branch || 3504 aconnector->mst_root) 3505 continue; 3506 3507 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); 3508 } 3509 drm_connector_list_iter_end(&iter); 3510 3511 amdgpu_dm_irq_resume_late(adev); 3512 3513 amdgpu_dm_smu_write_watermarks_table(adev); 3514 3515 drm_kms_helper_hotplug_event(ddev); 3516 3517 return 0; 3518 } 3519 3520 /** 3521 * DOC: DM Lifecycle 3522 * 3523 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3524 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3525 * the base driver's device list to be initialized and torn down accordingly. 3526 * 3527 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3528 */ 3529 3530 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3531 .name = "dm", 3532 .early_init = dm_early_init, 3533 .late_init = dm_late_init, 3534 .sw_init = dm_sw_init, 3535 .sw_fini = dm_sw_fini, 3536 .early_fini = amdgpu_dm_early_fini, 3537 .hw_init = dm_hw_init, 3538 .hw_fini = dm_hw_fini, 3539 .prepare_suspend = dm_prepare_suspend, 3540 .suspend = dm_suspend, 3541 .resume = dm_resume, 3542 .is_idle = dm_is_idle, 3543 .wait_for_idle = dm_wait_for_idle, 3544 .check_soft_reset = dm_check_soft_reset, 3545 .soft_reset = dm_soft_reset, 3546 .set_clockgating_state = dm_set_clockgating_state, 3547 .set_powergating_state = dm_set_powergating_state, 3548 }; 3549 3550 const struct amdgpu_ip_block_version dm_ip_block = { 3551 .type = AMD_IP_BLOCK_TYPE_DCE, 3552 .major = 1, 3553 .minor = 0, 3554 .rev = 0, 3555 .funcs = &amdgpu_dm_funcs, 3556 }; 3557 3558 3559 /** 3560 * DOC: atomic 3561 * 3562 * *WIP* 3563 */ 3564 3565 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3566 .fb_create = amdgpu_display_user_framebuffer_create, 3567 .get_format_info = amdgpu_dm_plane_get_format_info, 3568 .atomic_check = amdgpu_dm_atomic_check, 3569 .atomic_commit = drm_atomic_helper_commit, 3570 }; 3571 3572 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3573 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3574 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3575 }; 3576 3577 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3578 { 3579 struct amdgpu_dm_backlight_caps *caps; 3580 struct drm_connector *conn_base; 3581 struct amdgpu_device *adev; 3582 struct drm_luminance_range_info *luminance_range; 3583 int min_input_signal_override; 3584 3585 if (aconnector->bl_idx == -1 || 3586 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3587 return; 3588 3589 conn_base = &aconnector->base; 3590 adev = drm_to_adev(conn_base->dev); 3591 3592 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3593 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3594 caps->aux_support = false; 3595 3596 if (caps->ext_caps->bits.oled == 1 3597 /* 3598 * || 3599 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3600 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3601 */) 3602 caps->aux_support = true; 3603 3604 if (amdgpu_backlight == 0) 3605 caps->aux_support = false; 3606 else if (amdgpu_backlight == 1) 3607 caps->aux_support = true; 3608 if (caps->aux_support) 3609 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; 3610 3611 luminance_range = &conn_base->display_info.luminance_range; 3612 3613 if (luminance_range->max_luminance) { 3614 caps->aux_min_input_signal = luminance_range->min_luminance; 3615 caps->aux_max_input_signal = luminance_range->max_luminance; 3616 } else { 3617 caps->aux_min_input_signal = 0; 3618 caps->aux_max_input_signal = 512; 3619 } 3620 3621 min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); 3622 if (min_input_signal_override >= 0) 3623 caps->min_input_signal = min_input_signal_override; 3624 } 3625 3626 DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) 3627 3628 void amdgpu_dm_update_connector_after_detect( 3629 struct amdgpu_dm_connector *aconnector) 3630 { 3631 struct drm_connector *connector = &aconnector->base; 3632 struct dc_sink *sink __free(sink_release) = NULL; 3633 struct drm_device *dev = connector->dev; 3634 3635 /* MST handled by drm_mst framework */ 3636 if (aconnector->mst_mgr.mst_state == true) 3637 return; 3638 3639 sink = aconnector->dc_link->local_sink; 3640 if (sink) 3641 dc_sink_retain(sink); 3642 3643 /* 3644 * Edid mgmt connector gets first update only in mode_valid hook and then 3645 * the connector sink is set to either fake or physical sink depends on link status. 3646 * Skip if already done during boot. 3647 */ 3648 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3649 && aconnector->dc_em_sink) { 3650 3651 /* 3652 * For S3 resume with headless use eml_sink to fake stream 3653 * because on resume connector->sink is set to NULL 3654 */ 3655 guard(mutex)(&dev->mode_config.mutex); 3656 3657 if (sink) { 3658 if (aconnector->dc_sink) { 3659 amdgpu_dm_update_freesync_caps(connector, NULL); 3660 /* 3661 * retain and release below are used to 3662 * bump up refcount for sink because the link doesn't point 3663 * to it anymore after disconnect, so on next crtc to connector 3664 * reshuffle by UMD we will get into unwanted dc_sink release 3665 */ 3666 dc_sink_release(aconnector->dc_sink); 3667 } 3668 aconnector->dc_sink = sink; 3669 dc_sink_retain(aconnector->dc_sink); 3670 amdgpu_dm_update_freesync_caps(connector, 3671 aconnector->drm_edid); 3672 } else { 3673 amdgpu_dm_update_freesync_caps(connector, NULL); 3674 if (!aconnector->dc_sink) { 3675 aconnector->dc_sink = aconnector->dc_em_sink; 3676 dc_sink_retain(aconnector->dc_sink); 3677 } 3678 } 3679 3680 return; 3681 } 3682 3683 /* 3684 * TODO: temporary guard to look for proper fix 3685 * if this sink is MST sink, we should not do anything 3686 */ 3687 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 3688 return; 3689 3690 if (aconnector->dc_sink == sink) { 3691 /* 3692 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3693 * Do nothing!! 3694 */ 3695 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", 3696 aconnector->connector_id); 3697 return; 3698 } 3699 3700 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3701 aconnector->connector_id, aconnector->dc_sink, sink); 3702 3703 guard(mutex)(&dev->mode_config.mutex); 3704 3705 /* 3706 * 1. Update status of the drm connector 3707 * 2. Send an event and let userspace tell us what to do 3708 */ 3709 if (sink) { 3710 /* 3711 * TODO: check if we still need the S3 mode update workaround. 3712 * If yes, put it here. 3713 */ 3714 if (aconnector->dc_sink) { 3715 amdgpu_dm_update_freesync_caps(connector, NULL); 3716 dc_sink_release(aconnector->dc_sink); 3717 } 3718 3719 aconnector->dc_sink = sink; 3720 dc_sink_retain(aconnector->dc_sink); 3721 if (sink->dc_edid.length == 0) { 3722 aconnector->drm_edid = NULL; 3723 hdmi_cec_unset_edid(aconnector); 3724 if (aconnector->dc_link->aux_mode) { 3725 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3726 } 3727 } else { 3728 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; 3729 3730 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); 3731 drm_edid_connector_update(connector, aconnector->drm_edid); 3732 3733 hdmi_cec_set_edid(aconnector); 3734 if (aconnector->dc_link->aux_mode) 3735 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, 3736 connector->display_info.source_physical_address); 3737 } 3738 3739 if (!aconnector->timing_requested) { 3740 aconnector->timing_requested = 3741 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3742 if (!aconnector->timing_requested) 3743 drm_err(dev, 3744 "failed to create aconnector->requested_timing\n"); 3745 } 3746 3747 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 3748 update_connector_ext_caps(aconnector); 3749 } else { 3750 hdmi_cec_unset_edid(aconnector); 3751 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3752 amdgpu_dm_update_freesync_caps(connector, NULL); 3753 aconnector->num_modes = 0; 3754 dc_sink_release(aconnector->dc_sink); 3755 aconnector->dc_sink = NULL; 3756 drm_edid_free(aconnector->drm_edid); 3757 aconnector->drm_edid = NULL; 3758 kfree(aconnector->timing_requested); 3759 aconnector->timing_requested = NULL; 3760 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3761 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3762 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3763 } 3764 3765 update_subconnector_property(aconnector); 3766 } 3767 3768 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3769 { 3770 struct drm_connector *connector = &aconnector->base; 3771 struct drm_device *dev = connector->dev; 3772 enum dc_connection_type new_connection_type = dc_connection_none; 3773 struct amdgpu_device *adev = drm_to_adev(dev); 3774 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3775 struct dc *dc = aconnector->dc_link->ctx->dc; 3776 bool ret = false; 3777 3778 if (adev->dm.disable_hpd_irq) 3779 return; 3780 3781 /* 3782 * In case of failure or MST no need to update connector status or notify the OS 3783 * since (for MST case) MST does this in its own context. 3784 */ 3785 guard(mutex)(&aconnector->hpd_lock); 3786 3787 if (adev->dm.hdcp_workqueue) { 3788 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3789 dm_con_state->update_hdcp = true; 3790 } 3791 if (aconnector->fake_enable) 3792 aconnector->fake_enable = false; 3793 3794 aconnector->timing_changed = false; 3795 3796 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3797 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 3798 3799 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3800 emulated_link_detect(aconnector->dc_link); 3801 3802 drm_modeset_lock_all(dev); 3803 dm_restore_drm_connector_state(dev, connector); 3804 drm_modeset_unlock_all(dev); 3805 3806 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3807 drm_kms_helper_connector_hotplug_event(connector); 3808 } else { 3809 scoped_guard(mutex, &adev->dm.dc_lock) { 3810 dc_exit_ips_for_hw_access(dc); 3811 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3812 } 3813 if (ret) { 3814 /* w/a delay for certain panels */ 3815 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); 3816 amdgpu_dm_update_connector_after_detect(aconnector); 3817 3818 drm_modeset_lock_all(dev); 3819 dm_restore_drm_connector_state(dev, connector); 3820 drm_modeset_unlock_all(dev); 3821 3822 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3823 drm_kms_helper_connector_hotplug_event(connector); 3824 } 3825 } 3826 } 3827 3828 static void handle_hpd_irq(void *param) 3829 { 3830 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3831 3832 handle_hpd_irq_helper(aconnector); 3833 3834 } 3835 3836 static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, 3837 union hpd_irq_data hpd_irq_data) 3838 { 3839 struct hpd_rx_irq_offload_work *offload_work = 3840 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3841 3842 if (!offload_work) { 3843 drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); 3844 return; 3845 } 3846 3847 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3848 offload_work->data = hpd_irq_data; 3849 offload_work->offload_wq = offload_wq; 3850 offload_work->adev = adev; 3851 3852 queue_work(offload_wq->wq, &offload_work->work); 3853 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3854 } 3855 3856 static void handle_hpd_rx_irq(void *param) 3857 { 3858 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3859 struct drm_connector *connector = &aconnector->base; 3860 struct drm_device *dev = connector->dev; 3861 struct dc_link *dc_link = aconnector->dc_link; 3862 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3863 bool result = false; 3864 enum dc_connection_type new_connection_type = dc_connection_none; 3865 struct amdgpu_device *adev = drm_to_adev(dev); 3866 union hpd_irq_data hpd_irq_data; 3867 bool link_loss = false; 3868 bool has_left_work = false; 3869 int idx = dc_link->link_index; 3870 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3871 struct dc *dc = aconnector->dc_link->ctx->dc; 3872 3873 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3874 3875 if (adev->dm.disable_hpd_irq) 3876 return; 3877 3878 /* 3879 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3880 * conflict, after implement i2c helper, this mutex should be 3881 * retired. 3882 */ 3883 mutex_lock(&aconnector->hpd_lock); 3884 3885 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3886 &link_loss, true, &has_left_work); 3887 3888 if (!has_left_work) 3889 goto out; 3890 3891 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3892 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 3893 goto out; 3894 } 3895 3896 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3897 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3898 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3899 bool skip = false; 3900 3901 /* 3902 * DOWN_REP_MSG_RDY is also handled by polling method 3903 * mgr->cbs->poll_hpd_irq() 3904 */ 3905 spin_lock(&offload_wq->offload_lock); 3906 skip = offload_wq->is_handling_mst_msg_rdy_event; 3907 3908 if (!skip) 3909 offload_wq->is_handling_mst_msg_rdy_event = true; 3910 3911 spin_unlock(&offload_wq->offload_lock); 3912 3913 if (!skip) 3914 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 3915 3916 goto out; 3917 } 3918 3919 if (link_loss) { 3920 bool skip = false; 3921 3922 spin_lock(&offload_wq->offload_lock); 3923 skip = offload_wq->is_handling_link_loss; 3924 3925 if (!skip) 3926 offload_wq->is_handling_link_loss = true; 3927 3928 spin_unlock(&offload_wq->offload_lock); 3929 3930 if (!skip) 3931 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); 3932 3933 goto out; 3934 } 3935 } 3936 3937 out: 3938 if (result && !is_mst_root_connector) { 3939 /* Downstream Port status changed. */ 3940 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3941 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 3942 3943 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3944 emulated_link_detect(dc_link); 3945 3946 if (aconnector->fake_enable) 3947 aconnector->fake_enable = false; 3948 3949 amdgpu_dm_update_connector_after_detect(aconnector); 3950 3951 3952 drm_modeset_lock_all(dev); 3953 dm_restore_drm_connector_state(dev, connector); 3954 drm_modeset_unlock_all(dev); 3955 3956 drm_kms_helper_connector_hotplug_event(connector); 3957 } else { 3958 bool ret = false; 3959 3960 mutex_lock(&adev->dm.dc_lock); 3961 dc_exit_ips_for_hw_access(dc); 3962 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3963 mutex_unlock(&adev->dm.dc_lock); 3964 3965 if (ret) { 3966 if (aconnector->fake_enable) 3967 aconnector->fake_enable = false; 3968 3969 amdgpu_dm_update_connector_after_detect(aconnector); 3970 3971 drm_modeset_lock_all(dev); 3972 dm_restore_drm_connector_state(dev, connector); 3973 drm_modeset_unlock_all(dev); 3974 3975 drm_kms_helper_connector_hotplug_event(connector); 3976 } 3977 } 3978 } 3979 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3980 if (adev->dm.hdcp_workqueue) 3981 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3982 } 3983 3984 if (dc_link->type != dc_connection_mst_branch) 3985 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3986 3987 mutex_unlock(&aconnector->hpd_lock); 3988 } 3989 3990 static int register_hpd_handlers(struct amdgpu_device *adev) 3991 { 3992 struct drm_device *dev = adev_to_drm(adev); 3993 struct drm_connector *connector; 3994 struct amdgpu_dm_connector *aconnector; 3995 const struct dc_link *dc_link; 3996 struct dc_interrupt_params int_params = {0}; 3997 3998 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3999 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4000 4001 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 4002 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, 4003 dmub_hpd_callback, true)) { 4004 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); 4005 return -EINVAL; 4006 } 4007 4008 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, 4009 dmub_hpd_callback, true)) { 4010 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); 4011 return -EINVAL; 4012 } 4013 4014 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 4015 dmub_hpd_sense_callback, true)) { 4016 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback"); 4017 return -EINVAL; 4018 } 4019 } 4020 4021 list_for_each_entry(connector, 4022 &dev->mode_config.connector_list, head) { 4023 4024 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 4025 continue; 4026 4027 aconnector = to_amdgpu_dm_connector(connector); 4028 dc_link = aconnector->dc_link; 4029 4030 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 4031 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4032 int_params.irq_source = dc_link->irq_source_hpd; 4033 4034 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4035 int_params.irq_source < DC_IRQ_SOURCE_HPD1 || 4036 int_params.irq_source > DC_IRQ_SOURCE_HPD6) { 4037 drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n"); 4038 return -EINVAL; 4039 } 4040 4041 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4042 handle_hpd_irq, (void *) aconnector)) 4043 return -ENOMEM; 4044 } 4045 4046 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 4047 4048 /* Also register for DP short pulse (hpd_rx). */ 4049 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4050 int_params.irq_source = dc_link->irq_source_hpd_rx; 4051 4052 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4053 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || 4054 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { 4055 drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n"); 4056 return -EINVAL; 4057 } 4058 4059 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4060 handle_hpd_rx_irq, (void *) aconnector)) 4061 return -ENOMEM; 4062 } 4063 } 4064 return 0; 4065 } 4066 4067 #if defined(CONFIG_DRM_AMD_DC_SI) 4068 /* Register IRQ sources and initialize IRQ callbacks */ 4069 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 4070 { 4071 struct dc *dc = adev->dm.dc; 4072 struct common_irq_params *c_irq_params; 4073 struct dc_interrupt_params int_params = {0}; 4074 int r; 4075 int i; 4076 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4077 4078 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4079 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4080 4081 /* 4082 * Actions of amdgpu_irq_add_id(): 4083 * 1. Register a set() function with base driver. 4084 * Base driver will call set() function to enable/disable an 4085 * interrupt in DC hardware. 4086 * 2. Register amdgpu_dm_irq_handler(). 4087 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4088 * coming from DC hardware. 4089 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4090 * for acknowledging and handling. 4091 */ 4092 4093 /* Use VBLANK interrupt */ 4094 for (i = 0; i < adev->mode_info.num_crtc; i++) { 4095 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 4096 if (r) { 4097 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); 4098 return r; 4099 } 4100 4101 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4102 int_params.irq_source = 4103 dc_interrupt_to_irq_source(dc, i + 1, 0); 4104 4105 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4106 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4107 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4108 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); 4109 return -EINVAL; 4110 } 4111 4112 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4113 4114 c_irq_params->adev = adev; 4115 c_irq_params->irq_src = int_params.irq_source; 4116 4117 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4118 dm_crtc_high_irq, c_irq_params)) 4119 return -ENOMEM; 4120 } 4121 4122 /* Use GRPH_PFLIP interrupt */ 4123 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4124 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4125 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4126 if (r) { 4127 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); 4128 return r; 4129 } 4130 4131 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4132 int_params.irq_source = 4133 dc_interrupt_to_irq_source(dc, i, 0); 4134 4135 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4136 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4137 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4138 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); 4139 return -EINVAL; 4140 } 4141 4142 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4143 4144 c_irq_params->adev = adev; 4145 c_irq_params->irq_src = int_params.irq_source; 4146 4147 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4148 dm_pflip_high_irq, c_irq_params)) 4149 return -ENOMEM; 4150 } 4151 4152 /* HPD */ 4153 r = amdgpu_irq_add_id(adev, client_id, 4154 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4155 if (r) { 4156 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); 4157 return r; 4158 } 4159 4160 r = register_hpd_handlers(adev); 4161 4162 return r; 4163 } 4164 #endif 4165 4166 /* Register IRQ sources and initialize IRQ callbacks */ 4167 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 4168 { 4169 struct dc *dc = adev->dm.dc; 4170 struct common_irq_params *c_irq_params; 4171 struct dc_interrupt_params int_params = {0}; 4172 int r; 4173 int i; 4174 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 4175 4176 if (adev->family >= AMDGPU_FAMILY_AI) 4177 client_id = SOC15_IH_CLIENTID_DCE; 4178 4179 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4180 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4181 4182 /* 4183 * Actions of amdgpu_irq_add_id(): 4184 * 1. Register a set() function with base driver. 4185 * Base driver will call set() function to enable/disable an 4186 * interrupt in DC hardware. 4187 * 2. Register amdgpu_dm_irq_handler(). 4188 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4189 * coming from DC hardware. 4190 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4191 * for acknowledging and handling. 4192 */ 4193 4194 /* Use VBLANK interrupt */ 4195 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 4196 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 4197 if (r) { 4198 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); 4199 return r; 4200 } 4201 4202 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4203 int_params.irq_source = 4204 dc_interrupt_to_irq_source(dc, i, 0); 4205 4206 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4207 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4208 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4209 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); 4210 return -EINVAL; 4211 } 4212 4213 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4214 4215 c_irq_params->adev = adev; 4216 c_irq_params->irq_src = int_params.irq_source; 4217 4218 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4219 dm_crtc_high_irq, c_irq_params)) 4220 return -ENOMEM; 4221 } 4222 4223 /* Use VUPDATE interrupt */ 4224 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 4225 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 4226 if (r) { 4227 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); 4228 return r; 4229 } 4230 4231 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4232 int_params.irq_source = 4233 dc_interrupt_to_irq_source(dc, i, 0); 4234 4235 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4236 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4237 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4238 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); 4239 return -EINVAL; 4240 } 4241 4242 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4243 4244 c_irq_params->adev = adev; 4245 c_irq_params->irq_src = int_params.irq_source; 4246 4247 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4248 dm_vupdate_high_irq, c_irq_params)) 4249 return -ENOMEM; 4250 } 4251 4252 /* Use GRPH_PFLIP interrupt */ 4253 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 4254 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 4255 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 4256 if (r) { 4257 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); 4258 return r; 4259 } 4260 4261 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4262 int_params.irq_source = 4263 dc_interrupt_to_irq_source(dc, i, 0); 4264 4265 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4266 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4267 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4268 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); 4269 return -EINVAL; 4270 } 4271 4272 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4273 4274 c_irq_params->adev = adev; 4275 c_irq_params->irq_src = int_params.irq_source; 4276 4277 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4278 dm_pflip_high_irq, c_irq_params)) 4279 return -ENOMEM; 4280 } 4281 4282 /* HPD */ 4283 r = amdgpu_irq_add_id(adev, client_id, 4284 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 4285 if (r) { 4286 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); 4287 return r; 4288 } 4289 4290 r = register_hpd_handlers(adev); 4291 4292 return r; 4293 } 4294 4295 /* Register IRQ sources and initialize IRQ callbacks */ 4296 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 4297 { 4298 struct dc *dc = adev->dm.dc; 4299 struct common_irq_params *c_irq_params; 4300 struct dc_interrupt_params int_params = {0}; 4301 int r; 4302 int i; 4303 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4304 static const unsigned int vrtl_int_srcid[] = { 4305 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 4306 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 4307 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 4308 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 4309 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 4310 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 4311 }; 4312 #endif 4313 4314 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4315 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4316 4317 /* 4318 * Actions of amdgpu_irq_add_id(): 4319 * 1. Register a set() function with base driver. 4320 * Base driver will call set() function to enable/disable an 4321 * interrupt in DC hardware. 4322 * 2. Register amdgpu_dm_irq_handler(). 4323 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 4324 * coming from DC hardware. 4325 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 4326 * for acknowledging and handling. 4327 */ 4328 4329 /* Use VSTARTUP interrupt */ 4330 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 4331 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 4332 i++) { 4333 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 4334 4335 if (r) { 4336 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); 4337 return r; 4338 } 4339 4340 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4341 int_params.irq_source = 4342 dc_interrupt_to_irq_source(dc, i, 0); 4343 4344 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4345 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || 4346 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { 4347 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); 4348 return -EINVAL; 4349 } 4350 4351 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 4352 4353 c_irq_params->adev = adev; 4354 c_irq_params->irq_src = int_params.irq_source; 4355 4356 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4357 dm_crtc_high_irq, c_irq_params)) 4358 return -ENOMEM; 4359 } 4360 4361 /* Use otg vertical line interrupt */ 4362 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 4363 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 4364 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 4365 vrtl_int_srcid[i], &adev->vline0_irq); 4366 4367 if (r) { 4368 drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n"); 4369 return r; 4370 } 4371 4372 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4373 int_params.irq_source = 4374 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 4375 4376 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4377 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || 4378 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { 4379 drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n"); 4380 return -EINVAL; 4381 } 4382 4383 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 4384 - DC_IRQ_SOURCE_DC1_VLINE0]; 4385 4386 c_irq_params->adev = adev; 4387 c_irq_params->irq_src = int_params.irq_source; 4388 4389 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4390 dm_dcn_vertical_interrupt0_high_irq, 4391 c_irq_params)) 4392 return -ENOMEM; 4393 } 4394 #endif 4395 4396 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 4397 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 4398 * to trigger at end of each vblank, regardless of state of the lock, 4399 * matching DCE behaviour. 4400 */ 4401 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 4402 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 4403 i++) { 4404 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 4405 4406 if (r) { 4407 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); 4408 return r; 4409 } 4410 4411 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4412 int_params.irq_source = 4413 dc_interrupt_to_irq_source(dc, i, 0); 4414 4415 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4416 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || 4417 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { 4418 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); 4419 return -EINVAL; 4420 } 4421 4422 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 4423 4424 c_irq_params->adev = adev; 4425 c_irq_params->irq_src = int_params.irq_source; 4426 4427 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4428 dm_vupdate_high_irq, c_irq_params)) 4429 return -ENOMEM; 4430 } 4431 4432 /* Use GRPH_PFLIP interrupt */ 4433 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 4434 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 4435 i++) { 4436 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 4437 if (r) { 4438 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); 4439 return r; 4440 } 4441 4442 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 4443 int_params.irq_source = 4444 dc_interrupt_to_irq_source(dc, i, 0); 4445 4446 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || 4447 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || 4448 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { 4449 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); 4450 return -EINVAL; 4451 } 4452 4453 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 4454 4455 c_irq_params->adev = adev; 4456 c_irq_params->irq_src = int_params.irq_source; 4457 4458 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4459 dm_pflip_high_irq, c_irq_params)) 4460 return -ENOMEM; 4461 } 4462 4463 /* HPD */ 4464 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 4465 &adev->hpd_irq); 4466 if (r) { 4467 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); 4468 return r; 4469 } 4470 4471 r = register_hpd_handlers(adev); 4472 4473 return r; 4474 } 4475 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 4476 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 4477 { 4478 struct dc *dc = adev->dm.dc; 4479 struct common_irq_params *c_irq_params; 4480 struct dc_interrupt_params int_params = {0}; 4481 int r, i; 4482 4483 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 4484 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 4485 4486 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 4487 &adev->dmub_outbox_irq); 4488 if (r) { 4489 drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n"); 4490 return r; 4491 } 4492 4493 if (dc->ctx->dmub_srv) { 4494 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 4495 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 4496 int_params.irq_source = 4497 dc_interrupt_to_irq_source(dc, i, 0); 4498 4499 c_irq_params = &adev->dm.dmub_outbox_params[0]; 4500 4501 c_irq_params->adev = adev; 4502 c_irq_params->irq_src = int_params.irq_source; 4503 4504 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, 4505 dm_dmub_outbox1_low_irq, c_irq_params)) 4506 return -ENOMEM; 4507 } 4508 4509 return 0; 4510 } 4511 4512 /* 4513 * Acquires the lock for the atomic state object and returns 4514 * the new atomic state. 4515 * 4516 * This should only be called during atomic check. 4517 */ 4518 int dm_atomic_get_state(struct drm_atomic_state *state, 4519 struct dm_atomic_state **dm_state) 4520 { 4521 struct drm_device *dev = state->dev; 4522 struct amdgpu_device *adev = drm_to_adev(dev); 4523 struct amdgpu_display_manager *dm = &adev->dm; 4524 struct drm_private_state *priv_state; 4525 4526 if (*dm_state) 4527 return 0; 4528 4529 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 4530 if (IS_ERR(priv_state)) 4531 return PTR_ERR(priv_state); 4532 4533 *dm_state = to_dm_atomic_state(priv_state); 4534 4535 return 0; 4536 } 4537 4538 static struct dm_atomic_state * 4539 dm_atomic_get_new_state(struct drm_atomic_state *state) 4540 { 4541 struct drm_device *dev = state->dev; 4542 struct amdgpu_device *adev = drm_to_adev(dev); 4543 struct amdgpu_display_manager *dm = &adev->dm; 4544 struct drm_private_obj *obj; 4545 struct drm_private_state *new_obj_state; 4546 int i; 4547 4548 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 4549 if (obj->funcs == dm->atomic_obj.funcs) 4550 return to_dm_atomic_state(new_obj_state); 4551 } 4552 4553 return NULL; 4554 } 4555 4556 static struct drm_private_state * 4557 dm_atomic_duplicate_state(struct drm_private_obj *obj) 4558 { 4559 struct dm_atomic_state *old_state, *new_state; 4560 4561 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 4562 if (!new_state) 4563 return NULL; 4564 4565 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 4566 4567 old_state = to_dm_atomic_state(obj->state); 4568 4569 if (old_state && old_state->context) 4570 new_state->context = dc_state_create_copy(old_state->context); 4571 4572 if (!new_state->context) { 4573 kfree(new_state); 4574 return NULL; 4575 } 4576 4577 return &new_state->base; 4578 } 4579 4580 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 4581 struct drm_private_state *state) 4582 { 4583 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4584 4585 if (dm_state && dm_state->context) 4586 dc_state_release(dm_state->context); 4587 4588 kfree(dm_state); 4589 } 4590 4591 static struct drm_private_state_funcs dm_atomic_state_funcs = { 4592 .atomic_duplicate_state = dm_atomic_duplicate_state, 4593 .atomic_destroy_state = dm_atomic_destroy_state, 4594 }; 4595 4596 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 4597 { 4598 struct dm_atomic_state *state; 4599 int r; 4600 4601 adev->mode_info.mode_config_initialized = true; 4602 4603 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 4604 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 4605 4606 adev_to_drm(adev)->mode_config.max_width = 16384; 4607 adev_to_drm(adev)->mode_config.max_height = 16384; 4608 4609 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4610 if (adev->asic_type == CHIP_HAWAII) 4611 /* disable prefer shadow for now due to hibernation issues */ 4612 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4613 else 4614 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4615 /* indicates support for immediate flip */ 4616 adev_to_drm(adev)->mode_config.async_page_flip = true; 4617 4618 state = kzalloc(sizeof(*state), GFP_KERNEL); 4619 if (!state) 4620 return -ENOMEM; 4621 4622 state->context = dc_state_create_current_copy(adev->dm.dc); 4623 if (!state->context) { 4624 kfree(state); 4625 return -ENOMEM; 4626 } 4627 4628 drm_atomic_private_obj_init(adev_to_drm(adev), 4629 &adev->dm.atomic_obj, 4630 &state->base, 4631 &dm_atomic_state_funcs); 4632 4633 r = amdgpu_display_modeset_create_props(adev); 4634 if (r) { 4635 dc_state_release(state->context); 4636 kfree(state); 4637 return r; 4638 } 4639 4640 #ifdef AMD_PRIVATE_COLOR 4641 if (amdgpu_dm_create_color_properties(adev)) { 4642 dc_state_release(state->context); 4643 kfree(state); 4644 return -ENOMEM; 4645 } 4646 #endif 4647 4648 r = amdgpu_dm_audio_init(adev); 4649 if (r) { 4650 dc_state_release(state->context); 4651 kfree(state); 4652 return r; 4653 } 4654 4655 return 0; 4656 } 4657 4658 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4659 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4660 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) 4661 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4662 4663 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4664 int bl_idx) 4665 { 4666 struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx]; 4667 4668 if (caps->caps_valid) 4669 return; 4670 4671 #if defined(CONFIG_ACPI) 4672 amdgpu_acpi_get_backlight_caps(caps); 4673 4674 /* validate the firmware value is sane */ 4675 if (caps->caps_valid) { 4676 int spread = caps->max_input_signal - caps->min_input_signal; 4677 4678 if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4679 caps->min_input_signal < 0 || 4680 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || 4681 spread < AMDGPU_DM_MIN_SPREAD) { 4682 DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", 4683 caps->min_input_signal, caps->max_input_signal); 4684 caps->caps_valid = false; 4685 } 4686 } 4687 4688 if (!caps->caps_valid) { 4689 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4690 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4691 caps->caps_valid = true; 4692 } 4693 #else 4694 if (caps->aux_support) 4695 return; 4696 4697 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4698 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4699 caps->caps_valid = true; 4700 #endif 4701 } 4702 4703 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4704 unsigned int *min, unsigned int *max) 4705 { 4706 if (!caps) 4707 return 0; 4708 4709 if (caps->aux_support) { 4710 // Firmware limits are in nits, DC API wants millinits. 4711 *max = 1000 * caps->aux_max_input_signal; 4712 *min = 1000 * caps->aux_min_input_signal; 4713 } else { 4714 // Firmware limits are 8-bit, PWM control is 16-bit. 4715 *max = 0x101 * caps->max_input_signal; 4716 *min = 0x101 * caps->min_input_signal; 4717 } 4718 return 1; 4719 } 4720 4721 static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, 4722 uint32_t *brightness) 4723 { 4724 u8 prev_signal = 0, prev_lum = 0; 4725 int i = 0; 4726 4727 if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) 4728 return; 4729 4730 if (!caps->data_points) 4731 return; 4732 4733 /* choose start to run less interpolation steps */ 4734 if (caps->luminance_data[caps->data_points/2].input_signal > *brightness) 4735 i = caps->data_points/2; 4736 do { 4737 u8 signal = caps->luminance_data[i].input_signal; 4738 u8 lum = caps->luminance_data[i].luminance; 4739 4740 /* 4741 * brightness == signal: luminance is percent numerator 4742 * brightness < signal: interpolate between previous and current luminance numerator 4743 * brightness > signal: find next data point 4744 */ 4745 if (*brightness > signal) { 4746 prev_signal = signal; 4747 prev_lum = lum; 4748 i++; 4749 continue; 4750 } 4751 if (*brightness < signal) 4752 lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * 4753 (*brightness - prev_signal), 4754 signal - prev_signal); 4755 *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101); 4756 return; 4757 } while (i < caps->data_points); 4758 } 4759 4760 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4761 uint32_t brightness) 4762 { 4763 unsigned int min, max; 4764 4765 if (!get_brightness_range(caps, &min, &max)) 4766 return brightness; 4767 4768 convert_custom_brightness(caps, &brightness); 4769 4770 // Rescale 0..255 to min..max 4771 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4772 AMDGPU_MAX_BL_LEVEL); 4773 } 4774 4775 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4776 uint32_t brightness) 4777 { 4778 unsigned int min, max; 4779 4780 if (!get_brightness_range(caps, &min, &max)) 4781 return brightness; 4782 4783 if (brightness < min) 4784 return 0; 4785 // Rescale min..max to 0..255 4786 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4787 max - min); 4788 } 4789 4790 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4791 int bl_idx, 4792 u32 user_brightness) 4793 { 4794 struct amdgpu_dm_backlight_caps *caps; 4795 struct dc_link *link; 4796 u32 brightness; 4797 bool rc, reallow_idle = false; 4798 4799 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4800 caps = &dm->backlight_caps[bl_idx]; 4801 4802 dm->brightness[bl_idx] = user_brightness; 4803 /* update scratch register */ 4804 if (bl_idx == 0) 4805 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4806 brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); 4807 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4808 4809 /* Change brightness based on AUX property */ 4810 mutex_lock(&dm->dc_lock); 4811 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { 4812 dc_allow_idle_optimizations(dm->dc, false); 4813 reallow_idle = true; 4814 } 4815 4816 if (caps->aux_support) { 4817 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4818 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4819 if (!rc) 4820 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4821 } else { 4822 struct set_backlight_level_params backlight_level_params = { 0 }; 4823 4824 backlight_level_params.backlight_pwm_u16_16 = brightness; 4825 backlight_level_params.transition_time_in_ms = 0; 4826 4827 rc = dc_link_set_backlight_level(link, &backlight_level_params); 4828 if (!rc) 4829 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4830 } 4831 4832 if (dm->dc->caps.ips_support && reallow_idle) 4833 dc_allow_idle_optimizations(dm->dc, true); 4834 4835 mutex_unlock(&dm->dc_lock); 4836 4837 if (rc) 4838 dm->actual_brightness[bl_idx] = user_brightness; 4839 } 4840 4841 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4842 { 4843 struct amdgpu_display_manager *dm = bl_get_data(bd); 4844 int i; 4845 4846 for (i = 0; i < dm->num_of_edps; i++) { 4847 if (bd == dm->backlight_dev[i]) 4848 break; 4849 } 4850 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4851 i = 0; 4852 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4853 4854 return 0; 4855 } 4856 4857 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4858 int bl_idx) 4859 { 4860 int ret; 4861 struct amdgpu_dm_backlight_caps caps; 4862 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4863 4864 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4865 caps = dm->backlight_caps[bl_idx]; 4866 4867 if (caps.aux_support) { 4868 u32 avg, peak; 4869 bool rc; 4870 4871 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4872 if (!rc) 4873 return dm->brightness[bl_idx]; 4874 return convert_brightness_to_user(&caps, avg); 4875 } 4876 4877 ret = dc_link_get_backlight_level(link); 4878 4879 if (ret == DC_ERROR_UNEXPECTED) 4880 return dm->brightness[bl_idx]; 4881 4882 return convert_brightness_to_user(&caps, ret); 4883 } 4884 4885 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4886 { 4887 struct amdgpu_display_manager *dm = bl_get_data(bd); 4888 int i; 4889 4890 for (i = 0; i < dm->num_of_edps; i++) { 4891 if (bd == dm->backlight_dev[i]) 4892 break; 4893 } 4894 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4895 i = 0; 4896 return amdgpu_dm_backlight_get_level(dm, i); 4897 } 4898 4899 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4900 .options = BL_CORE_SUSPENDRESUME, 4901 .get_brightness = amdgpu_dm_backlight_get_brightness, 4902 .update_status = amdgpu_dm_backlight_update_status, 4903 }; 4904 4905 static void 4906 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4907 { 4908 struct drm_device *drm = aconnector->base.dev; 4909 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4910 struct backlight_properties props = { 0 }; 4911 struct amdgpu_dm_backlight_caps caps = { 0 }; 4912 char bl_name[16]; 4913 int min, max; 4914 4915 if (aconnector->bl_idx == -1) 4916 return; 4917 4918 if (!acpi_video_backlight_use_native()) { 4919 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4920 /* Try registering an ACPI video backlight device instead. */ 4921 acpi_video_register_backlight(); 4922 return; 4923 } 4924 4925 amdgpu_acpi_get_backlight_caps(&caps); 4926 if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) { 4927 if (power_supply_is_system_supplied() > 0) 4928 props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100); 4929 else 4930 props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100); 4931 /* min is zero, so max needs to be adjusted */ 4932 props.max_brightness = max - min; 4933 drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, 4934 caps.ac_level, caps.dc_level); 4935 } else 4936 props.brightness = AMDGPU_MAX_BL_LEVEL; 4937 4938 if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) 4939 drm_info(drm, "Using custom brightness curve\n"); 4940 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4941 props.type = BACKLIGHT_RAW; 4942 4943 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4944 drm->primary->index + aconnector->bl_idx); 4945 4946 dm->backlight_dev[aconnector->bl_idx] = 4947 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4948 &amdgpu_dm_backlight_ops, &props); 4949 dm->brightness[aconnector->bl_idx] = props.brightness; 4950 4951 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4952 drm_err(drm, "DM: Backlight registration failed!\n"); 4953 dm->backlight_dev[aconnector->bl_idx] = NULL; 4954 } else 4955 drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); 4956 } 4957 4958 static int initialize_plane(struct amdgpu_display_manager *dm, 4959 struct amdgpu_mode_info *mode_info, int plane_id, 4960 enum drm_plane_type plane_type, 4961 const struct dc_plane_cap *plane_cap) 4962 { 4963 struct drm_plane *plane; 4964 unsigned long possible_crtcs; 4965 int ret = 0; 4966 4967 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4968 if (!plane) { 4969 drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n"); 4970 return -ENOMEM; 4971 } 4972 plane->type = plane_type; 4973 4974 /* 4975 * HACK: IGT tests expect that the primary plane for a CRTC 4976 * can only have one possible CRTC. Only expose support for 4977 * any CRTC if they're not going to be used as a primary plane 4978 * for a CRTC - like overlay or underlay planes. 4979 */ 4980 possible_crtcs = 1 << plane_id; 4981 if (plane_id >= dm->dc->caps.max_streams) 4982 possible_crtcs = 0xff; 4983 4984 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4985 4986 if (ret) { 4987 drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n"); 4988 kfree(plane); 4989 return ret; 4990 } 4991 4992 if (mode_info) 4993 mode_info->planes[plane_id] = plane; 4994 4995 return ret; 4996 } 4997 4998 4999 static void setup_backlight_device(struct amdgpu_display_manager *dm, 5000 struct amdgpu_dm_connector *aconnector) 5001 { 5002 struct dc_link *link = aconnector->dc_link; 5003 int bl_idx = dm->num_of_edps; 5004 5005 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 5006 link->type == dc_connection_none) 5007 return; 5008 5009 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 5010 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 5011 return; 5012 } 5013 5014 aconnector->bl_idx = bl_idx; 5015 5016 amdgpu_dm_update_backlight_caps(dm, bl_idx); 5017 dm->backlight_link[bl_idx] = link; 5018 dm->num_of_edps++; 5019 5020 update_connector_ext_caps(aconnector); 5021 } 5022 5023 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 5024 5025 /* 5026 * In this architecture, the association 5027 * connector -> encoder -> crtc 5028 * id not really requried. The crtc and connector will hold the 5029 * display_index as an abstraction to use with DAL component 5030 * 5031 * Returns 0 on success 5032 */ 5033 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 5034 { 5035 struct amdgpu_display_manager *dm = &adev->dm; 5036 s32 i; 5037 struct amdgpu_dm_connector *aconnector = NULL; 5038 struct amdgpu_encoder *aencoder = NULL; 5039 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5040 u32 link_cnt; 5041 s32 primary_planes; 5042 enum dc_connection_type new_connection_type = dc_connection_none; 5043 const struct dc_plane_cap *plane; 5044 bool psr_feature_enabled = false; 5045 bool replay_feature_enabled = false; 5046 int max_overlay = dm->dc->caps.max_slave_planes; 5047 5048 dm->display_indexes_num = dm->dc->caps.max_streams; 5049 /* Update the actual used number of crtc */ 5050 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 5051 5052 amdgpu_dm_set_irq_funcs(adev); 5053 5054 link_cnt = dm->dc->caps.max_links; 5055 if (amdgpu_dm_mode_config_init(dm->adev)) { 5056 drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n"); 5057 return -EINVAL; 5058 } 5059 5060 /* There is one primary plane per CRTC */ 5061 primary_planes = dm->dc->caps.max_streams; 5062 if (primary_planes > AMDGPU_MAX_PLANES) { 5063 drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n"); 5064 return -EINVAL; 5065 } 5066 5067 /* 5068 * Initialize primary planes, implicit planes for legacy IOCTLS. 5069 * Order is reversed to match iteration order in atomic check. 5070 */ 5071 for (i = (primary_planes - 1); i >= 0; i--) { 5072 plane = &dm->dc->caps.planes[i]; 5073 5074 if (initialize_plane(dm, mode_info, i, 5075 DRM_PLANE_TYPE_PRIMARY, plane)) { 5076 drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n"); 5077 goto fail; 5078 } 5079 } 5080 5081 /* 5082 * Initialize overlay planes, index starting after primary planes. 5083 * These planes have a higher DRM index than the primary planes since 5084 * they should be considered as having a higher z-order. 5085 * Order is reversed to match iteration order in atomic check. 5086 * 5087 * Only support DCN for now, and only expose one so we don't encourage 5088 * userspace to use up all the pipes. 5089 */ 5090 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 5091 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 5092 5093 /* Do not create overlay if MPO disabled */ 5094 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 5095 break; 5096 5097 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 5098 continue; 5099 5100 if (!plane->pixel_format_support.argb8888) 5101 continue; 5102 5103 if (max_overlay-- == 0) 5104 break; 5105 5106 if (initialize_plane(dm, NULL, primary_planes + i, 5107 DRM_PLANE_TYPE_OVERLAY, plane)) { 5108 drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n"); 5109 goto fail; 5110 } 5111 } 5112 5113 for (i = 0; i < dm->dc->caps.max_streams; i++) 5114 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 5115 drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n"); 5116 goto fail; 5117 } 5118 5119 /* Use Outbox interrupt */ 5120 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5121 case IP_VERSION(3, 0, 0): 5122 case IP_VERSION(3, 1, 2): 5123 case IP_VERSION(3, 1, 3): 5124 case IP_VERSION(3, 1, 4): 5125 case IP_VERSION(3, 1, 5): 5126 case IP_VERSION(3, 1, 6): 5127 case IP_VERSION(3, 2, 0): 5128 case IP_VERSION(3, 2, 1): 5129 case IP_VERSION(2, 1, 0): 5130 case IP_VERSION(3, 5, 0): 5131 case IP_VERSION(3, 5, 1): 5132 case IP_VERSION(3, 6, 0): 5133 case IP_VERSION(4, 0, 1): 5134 if (register_outbox_irq_handlers(dm->adev)) { 5135 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5136 goto fail; 5137 } 5138 break; 5139 default: 5140 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 5141 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5142 } 5143 5144 /* Determine whether to enable PSR support by default. */ 5145 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 5146 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5147 case IP_VERSION(3, 1, 2): 5148 case IP_VERSION(3, 1, 3): 5149 case IP_VERSION(3, 1, 4): 5150 case IP_VERSION(3, 1, 5): 5151 case IP_VERSION(3, 1, 6): 5152 case IP_VERSION(3, 2, 0): 5153 case IP_VERSION(3, 2, 1): 5154 case IP_VERSION(3, 5, 0): 5155 case IP_VERSION(3, 5, 1): 5156 case IP_VERSION(3, 6, 0): 5157 case IP_VERSION(4, 0, 1): 5158 psr_feature_enabled = true; 5159 break; 5160 default: 5161 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 5162 break; 5163 } 5164 } 5165 5166 /* Determine whether to enable Replay support by default. */ 5167 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 5168 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5169 case IP_VERSION(3, 1, 4): 5170 case IP_VERSION(3, 2, 0): 5171 case IP_VERSION(3, 2, 1): 5172 case IP_VERSION(3, 5, 0): 5173 case IP_VERSION(3, 5, 1): 5174 case IP_VERSION(3, 6, 0): 5175 replay_feature_enabled = true; 5176 break; 5177 5178 default: 5179 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 5180 break; 5181 } 5182 } 5183 5184 if (link_cnt > MAX_LINKS) { 5185 drm_err(adev_to_drm(adev), 5186 "KMS: Cannot support more than %d display indexes\n", 5187 MAX_LINKS); 5188 goto fail; 5189 } 5190 5191 /* loops over all connectors on the board */ 5192 for (i = 0; i < link_cnt; i++) { 5193 struct dc_link *link = NULL; 5194 5195 link = dc_get_link_at_index(dm->dc, i); 5196 5197 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { 5198 struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); 5199 5200 if (!wbcon) { 5201 drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n"); 5202 continue; 5203 } 5204 5205 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { 5206 drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n"); 5207 kfree(wbcon); 5208 continue; 5209 } 5210 5211 link->psr_settings.psr_feature_enabled = false; 5212 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 5213 5214 continue; 5215 } 5216 5217 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 5218 if (!aconnector) 5219 goto fail; 5220 5221 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 5222 if (!aencoder) 5223 goto fail; 5224 5225 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 5226 drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n"); 5227 goto fail; 5228 } 5229 5230 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 5231 drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n"); 5232 goto fail; 5233 } 5234 5235 if (dm->hpd_rx_offload_wq) 5236 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 5237 aconnector; 5238 5239 if (!dc_link_detect_connection_type(link, &new_connection_type)) 5240 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); 5241 5242 if (aconnector->base.force && new_connection_type == dc_connection_none) { 5243 emulated_link_detect(link); 5244 amdgpu_dm_update_connector_after_detect(aconnector); 5245 } else { 5246 bool ret = false; 5247 5248 mutex_lock(&dm->dc_lock); 5249 dc_exit_ips_for_hw_access(dm->dc); 5250 ret = dc_link_detect(link, DETECT_REASON_BOOT); 5251 mutex_unlock(&dm->dc_lock); 5252 5253 if (ret) { 5254 amdgpu_dm_update_connector_after_detect(aconnector); 5255 setup_backlight_device(dm, aconnector); 5256 5257 /* Disable PSR if Replay can be enabled */ 5258 if (replay_feature_enabled) 5259 if (amdgpu_dm_set_replay_caps(link, aconnector)) 5260 psr_feature_enabled = false; 5261 5262 if (psr_feature_enabled) { 5263 amdgpu_dm_set_psr_caps(link); 5264 drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", 5265 link->psr_settings.psr_feature_enabled, 5266 link->psr_settings.psr_version, 5267 link->dpcd_caps.psr_info.psr_version, 5268 link->dpcd_caps.psr_info.psr_dpcd_caps.raw, 5269 link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); 5270 } 5271 } 5272 } 5273 amdgpu_set_panel_orientation(&aconnector->base); 5274 } 5275 5276 /* Software is initialized. Now we can register interrupt handlers. */ 5277 switch (adev->asic_type) { 5278 #if defined(CONFIG_DRM_AMD_DC_SI) 5279 case CHIP_TAHITI: 5280 case CHIP_PITCAIRN: 5281 case CHIP_VERDE: 5282 case CHIP_OLAND: 5283 if (dce60_register_irq_handlers(dm->adev)) { 5284 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5285 goto fail; 5286 } 5287 break; 5288 #endif 5289 case CHIP_BONAIRE: 5290 case CHIP_HAWAII: 5291 case CHIP_KAVERI: 5292 case CHIP_KABINI: 5293 case CHIP_MULLINS: 5294 case CHIP_TONGA: 5295 case CHIP_FIJI: 5296 case CHIP_CARRIZO: 5297 case CHIP_STONEY: 5298 case CHIP_POLARIS11: 5299 case CHIP_POLARIS10: 5300 case CHIP_POLARIS12: 5301 case CHIP_VEGAM: 5302 case CHIP_VEGA10: 5303 case CHIP_VEGA12: 5304 case CHIP_VEGA20: 5305 if (dce110_register_irq_handlers(dm->adev)) { 5306 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5307 goto fail; 5308 } 5309 break; 5310 default: 5311 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5312 case IP_VERSION(1, 0, 0): 5313 case IP_VERSION(1, 0, 1): 5314 case IP_VERSION(2, 0, 2): 5315 case IP_VERSION(2, 0, 3): 5316 case IP_VERSION(2, 0, 0): 5317 case IP_VERSION(2, 1, 0): 5318 case IP_VERSION(3, 0, 0): 5319 case IP_VERSION(3, 0, 2): 5320 case IP_VERSION(3, 0, 3): 5321 case IP_VERSION(3, 0, 1): 5322 case IP_VERSION(3, 1, 2): 5323 case IP_VERSION(3, 1, 3): 5324 case IP_VERSION(3, 1, 4): 5325 case IP_VERSION(3, 1, 5): 5326 case IP_VERSION(3, 1, 6): 5327 case IP_VERSION(3, 2, 0): 5328 case IP_VERSION(3, 2, 1): 5329 case IP_VERSION(3, 5, 0): 5330 case IP_VERSION(3, 5, 1): 5331 case IP_VERSION(3, 6, 0): 5332 case IP_VERSION(4, 0, 1): 5333 if (dcn10_register_irq_handlers(dm->adev)) { 5334 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); 5335 goto fail; 5336 } 5337 break; 5338 default: 5339 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n", 5340 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5341 goto fail; 5342 } 5343 break; 5344 } 5345 5346 return 0; 5347 fail: 5348 kfree(aencoder); 5349 kfree(aconnector); 5350 5351 return -EINVAL; 5352 } 5353 5354 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 5355 { 5356 drm_atomic_private_obj_fini(&dm->atomic_obj); 5357 } 5358 5359 /****************************************************************************** 5360 * amdgpu_display_funcs functions 5361 *****************************************************************************/ 5362 5363 /* 5364 * dm_bandwidth_update - program display watermarks 5365 * 5366 * @adev: amdgpu_device pointer 5367 * 5368 * Calculate and program the display watermarks and line buffer allocation. 5369 */ 5370 static void dm_bandwidth_update(struct amdgpu_device *adev) 5371 { 5372 /* TODO: implement later */ 5373 } 5374 5375 static const struct amdgpu_display_funcs dm_display_funcs = { 5376 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 5377 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 5378 .backlight_set_level = NULL, /* never called for DC */ 5379 .backlight_get_level = NULL, /* never called for DC */ 5380 .hpd_sense = NULL,/* called unconditionally */ 5381 .hpd_set_polarity = NULL, /* called unconditionally */ 5382 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 5383 .page_flip_get_scanoutpos = 5384 dm_crtc_get_scanoutpos,/* called unconditionally */ 5385 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 5386 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 5387 }; 5388 5389 #if defined(CONFIG_DEBUG_KERNEL_DC) 5390 5391 static ssize_t s3_debug_store(struct device *device, 5392 struct device_attribute *attr, 5393 const char *buf, 5394 size_t count) 5395 { 5396 int ret; 5397 int s3_state; 5398 struct drm_device *drm_dev = dev_get_drvdata(device); 5399 struct amdgpu_device *adev = drm_to_adev(drm_dev); 5400 struct amdgpu_ip_block *ip_block; 5401 5402 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); 5403 if (!ip_block) 5404 return -EINVAL; 5405 5406 ret = kstrtoint(buf, 0, &s3_state); 5407 5408 if (ret == 0) { 5409 if (s3_state) { 5410 dm_resume(ip_block); 5411 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 5412 } else 5413 dm_suspend(ip_block); 5414 } 5415 5416 return ret == 0 ? count : 0; 5417 } 5418 5419 DEVICE_ATTR_WO(s3_debug); 5420 5421 #endif 5422 5423 static int dm_init_microcode(struct amdgpu_device *adev) 5424 { 5425 char *fw_name_dmub; 5426 int r; 5427 5428 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5429 case IP_VERSION(2, 1, 0): 5430 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 5431 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 5432 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 5433 break; 5434 case IP_VERSION(3, 0, 0): 5435 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 5436 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 5437 else 5438 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 5439 break; 5440 case IP_VERSION(3, 0, 1): 5441 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 5442 break; 5443 case IP_VERSION(3, 0, 2): 5444 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 5445 break; 5446 case IP_VERSION(3, 0, 3): 5447 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 5448 break; 5449 case IP_VERSION(3, 1, 2): 5450 case IP_VERSION(3, 1, 3): 5451 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 5452 break; 5453 case IP_VERSION(3, 1, 4): 5454 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 5455 break; 5456 case IP_VERSION(3, 1, 5): 5457 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 5458 break; 5459 case IP_VERSION(3, 1, 6): 5460 fw_name_dmub = FIRMWARE_DCN316_DMUB; 5461 break; 5462 case IP_VERSION(3, 2, 0): 5463 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 5464 break; 5465 case IP_VERSION(3, 2, 1): 5466 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 5467 break; 5468 case IP_VERSION(3, 5, 0): 5469 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 5470 break; 5471 case IP_VERSION(3, 5, 1): 5472 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 5473 break; 5474 case IP_VERSION(3, 6, 0): 5475 fw_name_dmub = FIRMWARE_DCN_36_DMUB; 5476 break; 5477 case IP_VERSION(4, 0, 1): 5478 fw_name_dmub = FIRMWARE_DCN_401_DMUB; 5479 break; 5480 default: 5481 /* ASIC doesn't support DMUB. */ 5482 return 0; 5483 } 5484 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, 5485 "%s", fw_name_dmub); 5486 return r; 5487 } 5488 5489 static int dm_early_init(struct amdgpu_ip_block *ip_block) 5490 { 5491 struct amdgpu_device *adev = ip_block->adev; 5492 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5493 struct atom_context *ctx = mode_info->atom_context; 5494 int index = GetIndexIntoMasterTable(DATA, Object_Header); 5495 u16 data_offset; 5496 5497 /* if there is no object header, skip DM */ 5498 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 5499 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 5500 drm_info(adev_to_drm(adev), "No object header, skipping DM\n"); 5501 return -ENOENT; 5502 } 5503 5504 switch (adev->asic_type) { 5505 #if defined(CONFIG_DRM_AMD_DC_SI) 5506 case CHIP_TAHITI: 5507 case CHIP_PITCAIRN: 5508 case CHIP_VERDE: 5509 adev->mode_info.num_crtc = 6; 5510 adev->mode_info.num_hpd = 6; 5511 adev->mode_info.num_dig = 6; 5512 break; 5513 case CHIP_OLAND: 5514 adev->mode_info.num_crtc = 2; 5515 adev->mode_info.num_hpd = 2; 5516 adev->mode_info.num_dig = 2; 5517 break; 5518 #endif 5519 case CHIP_BONAIRE: 5520 case CHIP_HAWAII: 5521 adev->mode_info.num_crtc = 6; 5522 adev->mode_info.num_hpd = 6; 5523 adev->mode_info.num_dig = 6; 5524 break; 5525 case CHIP_KAVERI: 5526 adev->mode_info.num_crtc = 4; 5527 adev->mode_info.num_hpd = 6; 5528 adev->mode_info.num_dig = 7; 5529 break; 5530 case CHIP_KABINI: 5531 case CHIP_MULLINS: 5532 adev->mode_info.num_crtc = 2; 5533 adev->mode_info.num_hpd = 6; 5534 adev->mode_info.num_dig = 6; 5535 break; 5536 case CHIP_FIJI: 5537 case CHIP_TONGA: 5538 adev->mode_info.num_crtc = 6; 5539 adev->mode_info.num_hpd = 6; 5540 adev->mode_info.num_dig = 7; 5541 break; 5542 case CHIP_CARRIZO: 5543 adev->mode_info.num_crtc = 3; 5544 adev->mode_info.num_hpd = 6; 5545 adev->mode_info.num_dig = 9; 5546 break; 5547 case CHIP_STONEY: 5548 adev->mode_info.num_crtc = 2; 5549 adev->mode_info.num_hpd = 6; 5550 adev->mode_info.num_dig = 9; 5551 break; 5552 case CHIP_POLARIS11: 5553 case CHIP_POLARIS12: 5554 adev->mode_info.num_crtc = 5; 5555 adev->mode_info.num_hpd = 5; 5556 adev->mode_info.num_dig = 5; 5557 break; 5558 case CHIP_POLARIS10: 5559 case CHIP_VEGAM: 5560 adev->mode_info.num_crtc = 6; 5561 adev->mode_info.num_hpd = 6; 5562 adev->mode_info.num_dig = 6; 5563 break; 5564 case CHIP_VEGA10: 5565 case CHIP_VEGA12: 5566 case CHIP_VEGA20: 5567 adev->mode_info.num_crtc = 6; 5568 adev->mode_info.num_hpd = 6; 5569 adev->mode_info.num_dig = 6; 5570 break; 5571 default: 5572 5573 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 5574 case IP_VERSION(2, 0, 2): 5575 case IP_VERSION(3, 0, 0): 5576 adev->mode_info.num_crtc = 6; 5577 adev->mode_info.num_hpd = 6; 5578 adev->mode_info.num_dig = 6; 5579 break; 5580 case IP_VERSION(2, 0, 0): 5581 case IP_VERSION(3, 0, 2): 5582 adev->mode_info.num_crtc = 5; 5583 adev->mode_info.num_hpd = 5; 5584 adev->mode_info.num_dig = 5; 5585 break; 5586 case IP_VERSION(2, 0, 3): 5587 case IP_VERSION(3, 0, 3): 5588 adev->mode_info.num_crtc = 2; 5589 adev->mode_info.num_hpd = 2; 5590 adev->mode_info.num_dig = 2; 5591 break; 5592 case IP_VERSION(1, 0, 0): 5593 case IP_VERSION(1, 0, 1): 5594 case IP_VERSION(3, 0, 1): 5595 case IP_VERSION(2, 1, 0): 5596 case IP_VERSION(3, 1, 2): 5597 case IP_VERSION(3, 1, 3): 5598 case IP_VERSION(3, 1, 4): 5599 case IP_VERSION(3, 1, 5): 5600 case IP_VERSION(3, 1, 6): 5601 case IP_VERSION(3, 2, 0): 5602 case IP_VERSION(3, 2, 1): 5603 case IP_VERSION(3, 5, 0): 5604 case IP_VERSION(3, 5, 1): 5605 case IP_VERSION(3, 6, 0): 5606 case IP_VERSION(4, 0, 1): 5607 adev->mode_info.num_crtc = 4; 5608 adev->mode_info.num_hpd = 4; 5609 adev->mode_info.num_dig = 4; 5610 break; 5611 default: 5612 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n", 5613 amdgpu_ip_version(adev, DCE_HWIP, 0)); 5614 return -EINVAL; 5615 } 5616 break; 5617 } 5618 5619 if (adev->mode_info.funcs == NULL) 5620 adev->mode_info.funcs = &dm_display_funcs; 5621 5622 /* 5623 * Note: Do NOT change adev->audio_endpt_rreg and 5624 * adev->audio_endpt_wreg because they are initialised in 5625 * amdgpu_device_init() 5626 */ 5627 #if defined(CONFIG_DEBUG_KERNEL_DC) 5628 device_create_file( 5629 adev_to_drm(adev)->dev, 5630 &dev_attr_s3_debug); 5631 #endif 5632 adev->dc_enabled = true; 5633 5634 return dm_init_microcode(adev); 5635 } 5636 5637 static bool modereset_required(struct drm_crtc_state *crtc_state) 5638 { 5639 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 5640 } 5641 5642 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 5643 { 5644 drm_encoder_cleanup(encoder); 5645 kfree(encoder); 5646 } 5647 5648 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 5649 .destroy = amdgpu_dm_encoder_destroy, 5650 }; 5651 5652 static int 5653 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5654 const enum surface_pixel_format format, 5655 enum dc_color_space *color_space) 5656 { 5657 bool full_range; 5658 5659 *color_space = COLOR_SPACE_SRGB; 5660 5661 /* DRM color properties only affect non-RGB formats. */ 5662 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5663 return 0; 5664 5665 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5666 5667 switch (plane_state->color_encoding) { 5668 case DRM_COLOR_YCBCR_BT601: 5669 if (full_range) 5670 *color_space = COLOR_SPACE_YCBCR601; 5671 else 5672 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5673 break; 5674 5675 case DRM_COLOR_YCBCR_BT709: 5676 if (full_range) 5677 *color_space = COLOR_SPACE_YCBCR709; 5678 else 5679 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5680 break; 5681 5682 case DRM_COLOR_YCBCR_BT2020: 5683 if (full_range) 5684 *color_space = COLOR_SPACE_2020_YCBCR_FULL; 5685 else 5686 *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; 5687 break; 5688 5689 default: 5690 return -EINVAL; 5691 } 5692 5693 return 0; 5694 } 5695 5696 static int 5697 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5698 const struct drm_plane_state *plane_state, 5699 const u64 tiling_flags, 5700 struct dc_plane_info *plane_info, 5701 struct dc_plane_address *address, 5702 bool tmz_surface) 5703 { 5704 const struct drm_framebuffer *fb = plane_state->fb; 5705 const struct amdgpu_framebuffer *afb = 5706 to_amdgpu_framebuffer(plane_state->fb); 5707 int ret; 5708 5709 memset(plane_info, 0, sizeof(*plane_info)); 5710 5711 switch (fb->format->format) { 5712 case DRM_FORMAT_C8: 5713 plane_info->format = 5714 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5715 break; 5716 case DRM_FORMAT_RGB565: 5717 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5718 break; 5719 case DRM_FORMAT_XRGB8888: 5720 case DRM_FORMAT_ARGB8888: 5721 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5722 break; 5723 case DRM_FORMAT_XRGB2101010: 5724 case DRM_FORMAT_ARGB2101010: 5725 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5726 break; 5727 case DRM_FORMAT_XBGR2101010: 5728 case DRM_FORMAT_ABGR2101010: 5729 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5730 break; 5731 case DRM_FORMAT_XBGR8888: 5732 case DRM_FORMAT_ABGR8888: 5733 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5734 break; 5735 case DRM_FORMAT_NV21: 5736 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5737 break; 5738 case DRM_FORMAT_NV12: 5739 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5740 break; 5741 case DRM_FORMAT_P010: 5742 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5743 break; 5744 case DRM_FORMAT_XRGB16161616F: 5745 case DRM_FORMAT_ARGB16161616F: 5746 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5747 break; 5748 case DRM_FORMAT_XBGR16161616F: 5749 case DRM_FORMAT_ABGR16161616F: 5750 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5751 break; 5752 case DRM_FORMAT_XRGB16161616: 5753 case DRM_FORMAT_ARGB16161616: 5754 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5755 break; 5756 case DRM_FORMAT_XBGR16161616: 5757 case DRM_FORMAT_ABGR16161616: 5758 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5759 break; 5760 default: 5761 drm_err(adev_to_drm(adev), 5762 "Unsupported screen format %p4cc\n", 5763 &fb->format->format); 5764 return -EINVAL; 5765 } 5766 5767 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5768 case DRM_MODE_ROTATE_0: 5769 plane_info->rotation = ROTATION_ANGLE_0; 5770 break; 5771 case DRM_MODE_ROTATE_90: 5772 plane_info->rotation = ROTATION_ANGLE_90; 5773 break; 5774 case DRM_MODE_ROTATE_180: 5775 plane_info->rotation = ROTATION_ANGLE_180; 5776 break; 5777 case DRM_MODE_ROTATE_270: 5778 plane_info->rotation = ROTATION_ANGLE_270; 5779 break; 5780 default: 5781 plane_info->rotation = ROTATION_ANGLE_0; 5782 break; 5783 } 5784 5785 5786 plane_info->visible = true; 5787 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5788 5789 plane_info->layer_index = plane_state->normalized_zpos; 5790 5791 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5792 &plane_info->color_space); 5793 if (ret) 5794 return ret; 5795 5796 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5797 plane_info->rotation, tiling_flags, 5798 &plane_info->tiling_info, 5799 &plane_info->plane_size, 5800 &plane_info->dcc, address, 5801 tmz_surface); 5802 if (ret) 5803 return ret; 5804 5805 amdgpu_dm_plane_fill_blending_from_plane_state( 5806 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5807 &plane_info->global_alpha, &plane_info->global_alpha_value); 5808 5809 return 0; 5810 } 5811 5812 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5813 struct dc_plane_state *dc_plane_state, 5814 struct drm_plane_state *plane_state, 5815 struct drm_crtc_state *crtc_state) 5816 { 5817 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5818 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5819 struct dc_scaling_info scaling_info; 5820 struct dc_plane_info plane_info; 5821 int ret; 5822 5823 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5824 if (ret) 5825 return ret; 5826 5827 dc_plane_state->src_rect = scaling_info.src_rect; 5828 dc_plane_state->dst_rect = scaling_info.dst_rect; 5829 dc_plane_state->clip_rect = scaling_info.clip_rect; 5830 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5831 5832 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5833 afb->tiling_flags, 5834 &plane_info, 5835 &dc_plane_state->address, 5836 afb->tmz_surface); 5837 if (ret) 5838 return ret; 5839 5840 dc_plane_state->format = plane_info.format; 5841 dc_plane_state->color_space = plane_info.color_space; 5842 dc_plane_state->format = plane_info.format; 5843 dc_plane_state->plane_size = plane_info.plane_size; 5844 dc_plane_state->rotation = plane_info.rotation; 5845 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5846 dc_plane_state->stereo_format = plane_info.stereo_format; 5847 dc_plane_state->tiling_info = plane_info.tiling_info; 5848 dc_plane_state->visible = plane_info.visible; 5849 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5850 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5851 dc_plane_state->global_alpha = plane_info.global_alpha; 5852 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5853 dc_plane_state->dcc = plane_info.dcc; 5854 dc_plane_state->layer_index = plane_info.layer_index; 5855 dc_plane_state->flip_int_enabled = true; 5856 5857 /* 5858 * Always set input transfer function, since plane state is refreshed 5859 * every time. 5860 */ 5861 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, 5862 plane_state, 5863 dc_plane_state); 5864 if (ret) 5865 return ret; 5866 5867 return 0; 5868 } 5869 5870 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5871 struct rect *dirty_rect, int32_t x, 5872 s32 y, s32 width, s32 height, 5873 int *i, bool ffu) 5874 { 5875 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5876 5877 dirty_rect->x = x; 5878 dirty_rect->y = y; 5879 dirty_rect->width = width; 5880 dirty_rect->height = height; 5881 5882 if (ffu) 5883 drm_dbg(plane->dev, 5884 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5885 plane->base.id, width, height); 5886 else 5887 drm_dbg(plane->dev, 5888 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5889 plane->base.id, x, y, width, height); 5890 5891 (*i)++; 5892 } 5893 5894 /** 5895 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5896 * 5897 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5898 * remote fb 5899 * @old_plane_state: Old state of @plane 5900 * @new_plane_state: New state of @plane 5901 * @crtc_state: New state of CRTC connected to the @plane 5902 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5903 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. 5904 * If PSR SU is enabled and damage clips are available, only the regions of the screen 5905 * that have changed will be updated. If PSR SU is not enabled, 5906 * or if damage clips are not available, the entire screen will be updated. 5907 * @dirty_regions_changed: dirty regions changed 5908 * 5909 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5910 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5911 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5912 * amdgpu_dm's. 5913 * 5914 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5915 * plane with regions that require flushing to the eDP remote buffer. In 5916 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5917 * implicitly provide damage clips without any client support via the plane 5918 * bounds. 5919 */ 5920 static void fill_dc_dirty_rects(struct drm_plane *plane, 5921 struct drm_plane_state *old_plane_state, 5922 struct drm_plane_state *new_plane_state, 5923 struct drm_crtc_state *crtc_state, 5924 struct dc_flip_addrs *flip_addrs, 5925 bool is_psr_su, 5926 bool *dirty_regions_changed) 5927 { 5928 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5929 struct rect *dirty_rects = flip_addrs->dirty_rects; 5930 u32 num_clips; 5931 struct drm_mode_rect *clips; 5932 bool bb_changed; 5933 bool fb_changed; 5934 u32 i = 0; 5935 *dirty_regions_changed = false; 5936 5937 /* 5938 * Cursor plane has it's own dirty rect update interface. See 5939 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5940 */ 5941 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5942 return; 5943 5944 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5945 goto ffu; 5946 5947 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5948 clips = drm_plane_get_damage_clips(new_plane_state); 5949 5950 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && 5951 is_psr_su))) 5952 goto ffu; 5953 5954 if (!dm_crtc_state->mpo_requested) { 5955 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5956 goto ffu; 5957 5958 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5959 fill_dc_dirty_rect(new_plane_state->plane, 5960 &dirty_rects[flip_addrs->dirty_rect_count], 5961 clips->x1, clips->y1, 5962 clips->x2 - clips->x1, clips->y2 - clips->y1, 5963 &flip_addrs->dirty_rect_count, 5964 false); 5965 return; 5966 } 5967 5968 /* 5969 * MPO is requested. Add entire plane bounding box to dirty rects if 5970 * flipped to or damaged. 5971 * 5972 * If plane is moved or resized, also add old bounding box to dirty 5973 * rects. 5974 */ 5975 fb_changed = old_plane_state->fb->base.id != 5976 new_plane_state->fb->base.id; 5977 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5978 old_plane_state->crtc_y != new_plane_state->crtc_y || 5979 old_plane_state->crtc_w != new_plane_state->crtc_w || 5980 old_plane_state->crtc_h != new_plane_state->crtc_h); 5981 5982 drm_dbg(plane->dev, 5983 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5984 new_plane_state->plane->base.id, 5985 bb_changed, fb_changed, num_clips); 5986 5987 *dirty_regions_changed = bb_changed; 5988 5989 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5990 goto ffu; 5991 5992 if (bb_changed) { 5993 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5994 new_plane_state->crtc_x, 5995 new_plane_state->crtc_y, 5996 new_plane_state->crtc_w, 5997 new_plane_state->crtc_h, &i, false); 5998 5999 /* Add old plane bounding-box if plane is moved or resized */ 6000 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 6001 old_plane_state->crtc_x, 6002 old_plane_state->crtc_y, 6003 old_plane_state->crtc_w, 6004 old_plane_state->crtc_h, &i, false); 6005 } 6006 6007 if (num_clips) { 6008 for (; i < num_clips; clips++) 6009 fill_dc_dirty_rect(new_plane_state->plane, 6010 &dirty_rects[i], clips->x1, 6011 clips->y1, clips->x2 - clips->x1, 6012 clips->y2 - clips->y1, &i, false); 6013 } else if (fb_changed && !bb_changed) { 6014 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 6015 new_plane_state->crtc_x, 6016 new_plane_state->crtc_y, 6017 new_plane_state->crtc_w, 6018 new_plane_state->crtc_h, &i, false); 6019 } 6020 6021 flip_addrs->dirty_rect_count = i; 6022 return; 6023 6024 ffu: 6025 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 6026 dm_crtc_state->base.mode.crtc_hdisplay, 6027 dm_crtc_state->base.mode.crtc_vdisplay, 6028 &flip_addrs->dirty_rect_count, true); 6029 } 6030 6031 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 6032 const struct dm_connector_state *dm_state, 6033 struct dc_stream_state *stream) 6034 { 6035 enum amdgpu_rmx_type rmx_type; 6036 6037 struct rect src = { 0 }; /* viewport in composition space*/ 6038 struct rect dst = { 0 }; /* stream addressable area */ 6039 6040 /* no mode. nothing to be done */ 6041 if (!mode) 6042 return; 6043 6044 /* Full screen scaling by default */ 6045 src.width = mode->hdisplay; 6046 src.height = mode->vdisplay; 6047 dst.width = stream->timing.h_addressable; 6048 dst.height = stream->timing.v_addressable; 6049 6050 if (dm_state) { 6051 rmx_type = dm_state->scaling; 6052 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 6053 if (src.width * dst.height < 6054 src.height * dst.width) { 6055 /* height needs less upscaling/more downscaling */ 6056 dst.width = src.width * 6057 dst.height / src.height; 6058 } else { 6059 /* width needs less upscaling/more downscaling */ 6060 dst.height = src.height * 6061 dst.width / src.width; 6062 } 6063 } else if (rmx_type == RMX_CENTER) { 6064 dst = src; 6065 } 6066 6067 dst.x = (stream->timing.h_addressable - dst.width) / 2; 6068 dst.y = (stream->timing.v_addressable - dst.height) / 2; 6069 6070 if (dm_state->underscan_enable) { 6071 dst.x += dm_state->underscan_hborder / 2; 6072 dst.y += dm_state->underscan_vborder / 2; 6073 dst.width -= dm_state->underscan_hborder; 6074 dst.height -= dm_state->underscan_vborder; 6075 } 6076 } 6077 6078 stream->src = src; 6079 stream->dst = dst; 6080 6081 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 6082 dst.x, dst.y, dst.width, dst.height); 6083 6084 } 6085 6086 static enum dc_color_depth 6087 convert_color_depth_from_display_info(const struct drm_connector *connector, 6088 bool is_y420, int requested_bpc) 6089 { 6090 u8 bpc; 6091 6092 if (is_y420) { 6093 bpc = 8; 6094 6095 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 6096 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 6097 bpc = 16; 6098 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 6099 bpc = 12; 6100 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 6101 bpc = 10; 6102 } else { 6103 bpc = (uint8_t)connector->display_info.bpc; 6104 /* Assume 8 bpc by default if no bpc is specified. */ 6105 bpc = bpc ? bpc : 8; 6106 } 6107 6108 if (requested_bpc > 0) { 6109 /* 6110 * Cap display bpc based on the user requested value. 6111 * 6112 * The value for state->max_bpc may not correctly updated 6113 * depending on when the connector gets added to the state 6114 * or if this was called outside of atomic check, so it 6115 * can't be used directly. 6116 */ 6117 bpc = min_t(u8, bpc, requested_bpc); 6118 6119 /* Round down to the nearest even number. */ 6120 bpc = bpc - (bpc & 1); 6121 } 6122 6123 switch (bpc) { 6124 case 0: 6125 /* 6126 * Temporary Work around, DRM doesn't parse color depth for 6127 * EDID revision before 1.4 6128 * TODO: Fix edid parsing 6129 */ 6130 return COLOR_DEPTH_888; 6131 case 6: 6132 return COLOR_DEPTH_666; 6133 case 8: 6134 return COLOR_DEPTH_888; 6135 case 10: 6136 return COLOR_DEPTH_101010; 6137 case 12: 6138 return COLOR_DEPTH_121212; 6139 case 14: 6140 return COLOR_DEPTH_141414; 6141 case 16: 6142 return COLOR_DEPTH_161616; 6143 default: 6144 return COLOR_DEPTH_UNDEFINED; 6145 } 6146 } 6147 6148 static enum dc_aspect_ratio 6149 get_aspect_ratio(const struct drm_display_mode *mode_in) 6150 { 6151 /* 1-1 mapping, since both enums follow the HDMI spec. */ 6152 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 6153 } 6154 6155 static enum dc_color_space 6156 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 6157 const struct drm_connector_state *connector_state) 6158 { 6159 enum dc_color_space color_space = COLOR_SPACE_SRGB; 6160 6161 switch (connector_state->colorspace) { 6162 case DRM_MODE_COLORIMETRY_BT601_YCC: 6163 if (dc_crtc_timing->flags.Y_ONLY) 6164 color_space = COLOR_SPACE_YCBCR601_LIMITED; 6165 else 6166 color_space = COLOR_SPACE_YCBCR601; 6167 break; 6168 case DRM_MODE_COLORIMETRY_BT709_YCC: 6169 if (dc_crtc_timing->flags.Y_ONLY) 6170 color_space = COLOR_SPACE_YCBCR709_LIMITED; 6171 else 6172 color_space = COLOR_SPACE_YCBCR709; 6173 break; 6174 case DRM_MODE_COLORIMETRY_OPRGB: 6175 color_space = COLOR_SPACE_ADOBERGB; 6176 break; 6177 case DRM_MODE_COLORIMETRY_BT2020_RGB: 6178 case DRM_MODE_COLORIMETRY_BT2020_YCC: 6179 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 6180 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 6181 else 6182 color_space = COLOR_SPACE_2020_YCBCR_LIMITED; 6183 break; 6184 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 6185 default: 6186 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 6187 color_space = COLOR_SPACE_SRGB; 6188 if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED) 6189 color_space = COLOR_SPACE_SRGB_LIMITED; 6190 /* 6191 * 27030khz is the separation point between HDTV and SDTV 6192 * according to HDMI spec, we use YCbCr709 and YCbCr601 6193 * respectively 6194 */ 6195 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 6196 if (dc_crtc_timing->flags.Y_ONLY) 6197 color_space = 6198 COLOR_SPACE_YCBCR709_LIMITED; 6199 else 6200 color_space = COLOR_SPACE_YCBCR709; 6201 } else { 6202 if (dc_crtc_timing->flags.Y_ONLY) 6203 color_space = 6204 COLOR_SPACE_YCBCR601_LIMITED; 6205 else 6206 color_space = COLOR_SPACE_YCBCR601; 6207 } 6208 break; 6209 } 6210 6211 return color_space; 6212 } 6213 6214 static enum display_content_type 6215 get_output_content_type(const struct drm_connector_state *connector_state) 6216 { 6217 switch (connector_state->content_type) { 6218 default: 6219 case DRM_MODE_CONTENT_TYPE_NO_DATA: 6220 return DISPLAY_CONTENT_TYPE_NO_DATA; 6221 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 6222 return DISPLAY_CONTENT_TYPE_GRAPHICS; 6223 case DRM_MODE_CONTENT_TYPE_PHOTO: 6224 return DISPLAY_CONTENT_TYPE_PHOTO; 6225 case DRM_MODE_CONTENT_TYPE_CINEMA: 6226 return DISPLAY_CONTENT_TYPE_CINEMA; 6227 case DRM_MODE_CONTENT_TYPE_GAME: 6228 return DISPLAY_CONTENT_TYPE_GAME; 6229 } 6230 } 6231 6232 static bool adjust_colour_depth_from_display_info( 6233 struct dc_crtc_timing *timing_out, 6234 const struct drm_display_info *info) 6235 { 6236 enum dc_color_depth depth = timing_out->display_color_depth; 6237 int normalized_clk; 6238 6239 do { 6240 normalized_clk = timing_out->pix_clk_100hz / 10; 6241 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6242 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6243 normalized_clk /= 2; 6244 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6245 switch (depth) { 6246 case COLOR_DEPTH_888: 6247 break; 6248 case COLOR_DEPTH_101010: 6249 normalized_clk = (normalized_clk * 30) / 24; 6250 break; 6251 case COLOR_DEPTH_121212: 6252 normalized_clk = (normalized_clk * 36) / 24; 6253 break; 6254 case COLOR_DEPTH_161616: 6255 normalized_clk = (normalized_clk * 48) / 24; 6256 break; 6257 default: 6258 /* The above depths are the only ones valid for HDMI. */ 6259 return false; 6260 } 6261 if (normalized_clk <= info->max_tmds_clock) { 6262 timing_out->display_color_depth = depth; 6263 return true; 6264 } 6265 } while (--depth > COLOR_DEPTH_666); 6266 return false; 6267 } 6268 6269 static void fill_stream_properties_from_drm_display_mode( 6270 struct dc_stream_state *stream, 6271 const struct drm_display_mode *mode_in, 6272 const struct drm_connector *connector, 6273 const struct drm_connector_state *connector_state, 6274 const struct dc_stream_state *old_stream, 6275 int requested_bpc) 6276 { 6277 struct dc_crtc_timing *timing_out = &stream->timing; 6278 const struct drm_display_info *info = &connector->display_info; 6279 struct amdgpu_dm_connector *aconnector = NULL; 6280 struct hdmi_vendor_infoframe hv_frame; 6281 struct hdmi_avi_infoframe avi_frame; 6282 ssize_t err; 6283 6284 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 6285 aconnector = to_amdgpu_dm_connector(connector); 6286 6287 memset(&hv_frame, 0, sizeof(hv_frame)); 6288 memset(&avi_frame, 0, sizeof(avi_frame)); 6289 6290 timing_out->h_border_left = 0; 6291 timing_out->h_border_right = 0; 6292 timing_out->v_border_top = 0; 6293 timing_out->v_border_bottom = 0; 6294 /* TODO: un-hardcode */ 6295 if (drm_mode_is_420_only(info, mode_in) 6296 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6297 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6298 else if (drm_mode_is_420_also(info, mode_in) 6299 && aconnector 6300 && aconnector->force_yuv420_output) 6301 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6302 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6303 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6304 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6305 else 6306 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6307 6308 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6309 timing_out->display_color_depth = convert_color_depth_from_display_info( 6310 connector, 6311 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6312 requested_bpc); 6313 timing_out->scan_type = SCANNING_TYPE_NODATA; 6314 timing_out->hdmi_vic = 0; 6315 6316 if (old_stream) { 6317 timing_out->vic = old_stream->timing.vic; 6318 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6319 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6320 } else { 6321 timing_out->vic = drm_match_cea_mode(mode_in); 6322 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6323 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6324 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6325 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6326 } 6327 6328 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6329 err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, 6330 (struct drm_connector *)connector, 6331 mode_in); 6332 if (err < 0) 6333 drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err); 6334 timing_out->vic = avi_frame.video_code; 6335 err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, 6336 (struct drm_connector *)connector, 6337 mode_in); 6338 if (err < 0) 6339 drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err); 6340 timing_out->hdmi_vic = hv_frame.vic; 6341 } 6342 6343 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { 6344 timing_out->h_addressable = mode_in->hdisplay; 6345 timing_out->h_total = mode_in->htotal; 6346 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6347 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6348 timing_out->v_total = mode_in->vtotal; 6349 timing_out->v_addressable = mode_in->vdisplay; 6350 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6351 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6352 timing_out->pix_clk_100hz = mode_in->clock * 10; 6353 } else { 6354 timing_out->h_addressable = mode_in->crtc_hdisplay; 6355 timing_out->h_total = mode_in->crtc_htotal; 6356 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6357 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6358 timing_out->v_total = mode_in->crtc_vtotal; 6359 timing_out->v_addressable = mode_in->crtc_vdisplay; 6360 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6361 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6362 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6363 } 6364 6365 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6366 6367 stream->out_transfer_func.type = TF_TYPE_PREDEFINED; 6368 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; 6369 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6370 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6371 drm_mode_is_420_also(info, mode_in) && 6372 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6373 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6374 adjust_colour_depth_from_display_info(timing_out, info); 6375 } 6376 } 6377 6378 stream->output_color_space = get_output_color_space(timing_out, connector_state); 6379 stream->content_type = get_output_content_type(connector_state); 6380 } 6381 6382 static void fill_audio_info(struct audio_info *audio_info, 6383 const struct drm_connector *drm_connector, 6384 const struct dc_sink *dc_sink) 6385 { 6386 int i = 0; 6387 int cea_revision = 0; 6388 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6389 6390 audio_info->manufacture_id = edid_caps->manufacturer_id; 6391 audio_info->product_id = edid_caps->product_id; 6392 6393 cea_revision = drm_connector->display_info.cea_rev; 6394 6395 strscpy(audio_info->display_name, 6396 edid_caps->display_name, 6397 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6398 6399 if (cea_revision >= 3) { 6400 audio_info->mode_count = edid_caps->audio_mode_count; 6401 6402 for (i = 0; i < audio_info->mode_count; ++i) { 6403 audio_info->modes[i].format_code = 6404 (enum audio_format_code) 6405 (edid_caps->audio_modes[i].format_code); 6406 audio_info->modes[i].channel_count = 6407 edid_caps->audio_modes[i].channel_count; 6408 audio_info->modes[i].sample_rates.all = 6409 edid_caps->audio_modes[i].sample_rate; 6410 audio_info->modes[i].sample_size = 6411 edid_caps->audio_modes[i].sample_size; 6412 } 6413 } 6414 6415 audio_info->flags.all = edid_caps->speaker_flags; 6416 6417 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6418 if (drm_connector->latency_present[0]) { 6419 audio_info->video_latency = drm_connector->video_latency[0]; 6420 audio_info->audio_latency = drm_connector->audio_latency[0]; 6421 } 6422 6423 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6424 6425 } 6426 6427 static void 6428 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6429 struct drm_display_mode *dst_mode) 6430 { 6431 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6432 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6433 dst_mode->crtc_clock = src_mode->crtc_clock; 6434 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6435 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6436 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6437 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6438 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6439 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6440 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6441 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6442 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6443 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6444 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6445 } 6446 6447 static void 6448 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6449 const struct drm_display_mode *native_mode, 6450 bool scale_enabled) 6451 { 6452 if (scale_enabled || ( 6453 native_mode->clock == drm_mode->clock && 6454 native_mode->htotal == drm_mode->htotal && 6455 native_mode->vtotal == drm_mode->vtotal)) { 6456 if (native_mode->crtc_clock) 6457 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6458 } else { 6459 /* no scaling nor amdgpu inserted, no need to patch */ 6460 } 6461 } 6462 6463 static struct dc_sink * 6464 create_fake_sink(struct drm_device *dev, struct dc_link *link) 6465 { 6466 struct dc_sink_init_data sink_init_data = { 0 }; 6467 struct dc_sink *sink = NULL; 6468 6469 sink_init_data.link = link; 6470 sink_init_data.sink_signal = link->connector_signal; 6471 6472 sink = dc_sink_create(&sink_init_data); 6473 if (!sink) { 6474 drm_err(dev, "Failed to create sink!\n"); 6475 return NULL; 6476 } 6477 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6478 6479 return sink; 6480 } 6481 6482 static void set_multisync_trigger_params( 6483 struct dc_stream_state *stream) 6484 { 6485 struct dc_stream_state *master = NULL; 6486 6487 if (stream->triggered_crtc_reset.enabled) { 6488 master = stream->triggered_crtc_reset.event_source; 6489 stream->triggered_crtc_reset.event = 6490 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6491 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6492 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6493 } 6494 } 6495 6496 static void set_master_stream(struct dc_stream_state *stream_set[], 6497 int stream_count) 6498 { 6499 int j, highest_rfr = 0, master_stream = 0; 6500 6501 for (j = 0; j < stream_count; j++) { 6502 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6503 int refresh_rate = 0; 6504 6505 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6506 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6507 if (refresh_rate > highest_rfr) { 6508 highest_rfr = refresh_rate; 6509 master_stream = j; 6510 } 6511 } 6512 } 6513 for (j = 0; j < stream_count; j++) { 6514 if (stream_set[j]) 6515 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6516 } 6517 } 6518 6519 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6520 { 6521 int i = 0; 6522 struct dc_stream_state *stream; 6523 6524 if (context->stream_count < 2) 6525 return; 6526 for (i = 0; i < context->stream_count ; i++) { 6527 if (!context->streams[i]) 6528 continue; 6529 /* 6530 * TODO: add a function to read AMD VSDB bits and set 6531 * crtc_sync_master.multi_sync_enabled flag 6532 * For now it's set to false 6533 */ 6534 } 6535 6536 set_master_stream(context->streams, context->stream_count); 6537 6538 for (i = 0; i < context->stream_count ; i++) { 6539 stream = context->streams[i]; 6540 6541 if (!stream) 6542 continue; 6543 6544 set_multisync_trigger_params(stream); 6545 } 6546 } 6547 6548 /** 6549 * DOC: FreeSync Video 6550 * 6551 * When a userspace application wants to play a video, the content follows a 6552 * standard format definition that usually specifies the FPS for that format. 6553 * The below list illustrates some video format and the expected FPS, 6554 * respectively: 6555 * 6556 * - TV/NTSC (23.976 FPS) 6557 * - Cinema (24 FPS) 6558 * - TV/PAL (25 FPS) 6559 * - TV/NTSC (29.97 FPS) 6560 * - TV/NTSC (30 FPS) 6561 * - Cinema HFR (48 FPS) 6562 * - TV/PAL (50 FPS) 6563 * - Commonly used (60 FPS) 6564 * - Multiples of 24 (48,72,96 FPS) 6565 * 6566 * The list of standards video format is not huge and can be added to the 6567 * connector modeset list beforehand. With that, userspace can leverage 6568 * FreeSync to extends the front porch in order to attain the target refresh 6569 * rate. Such a switch will happen seamlessly, without screen blanking or 6570 * reprogramming of the output in any other way. If the userspace requests a 6571 * modesetting change compatible with FreeSync modes that only differ in the 6572 * refresh rate, DC will skip the full update and avoid blink during the 6573 * transition. For example, the video player can change the modesetting from 6574 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6575 * causing any display blink. This same concept can be applied to a mode 6576 * setting change. 6577 */ 6578 static struct drm_display_mode * 6579 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6580 bool use_probed_modes) 6581 { 6582 struct drm_display_mode *m, *m_pref = NULL; 6583 u16 current_refresh, highest_refresh; 6584 struct list_head *list_head = use_probed_modes ? 6585 &aconnector->base.probed_modes : 6586 &aconnector->base.modes; 6587 6588 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 6589 return NULL; 6590 6591 if (aconnector->freesync_vid_base.clock != 0) 6592 return &aconnector->freesync_vid_base; 6593 6594 /* Find the preferred mode */ 6595 list_for_each_entry(m, list_head, head) { 6596 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6597 m_pref = m; 6598 break; 6599 } 6600 } 6601 6602 if (!m_pref) { 6603 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6604 m_pref = list_first_entry_or_null( 6605 &aconnector->base.modes, struct drm_display_mode, head); 6606 if (!m_pref) { 6607 drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); 6608 return NULL; 6609 } 6610 } 6611 6612 highest_refresh = drm_mode_vrefresh(m_pref); 6613 6614 /* 6615 * Find the mode with highest refresh rate with same resolution. 6616 * For some monitors, preferred mode is not the mode with highest 6617 * supported refresh rate. 6618 */ 6619 list_for_each_entry(m, list_head, head) { 6620 current_refresh = drm_mode_vrefresh(m); 6621 6622 if (m->hdisplay == m_pref->hdisplay && 6623 m->vdisplay == m_pref->vdisplay && 6624 highest_refresh < current_refresh) { 6625 highest_refresh = current_refresh; 6626 m_pref = m; 6627 } 6628 } 6629 6630 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6631 return m_pref; 6632 } 6633 6634 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6635 struct amdgpu_dm_connector *aconnector) 6636 { 6637 struct drm_display_mode *high_mode; 6638 int timing_diff; 6639 6640 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6641 if (!high_mode || !mode) 6642 return false; 6643 6644 timing_diff = high_mode->vtotal - mode->vtotal; 6645 6646 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6647 high_mode->hdisplay != mode->hdisplay || 6648 high_mode->vdisplay != mode->vdisplay || 6649 high_mode->hsync_start != mode->hsync_start || 6650 high_mode->hsync_end != mode->hsync_end || 6651 high_mode->htotal != mode->htotal || 6652 high_mode->hskew != mode->hskew || 6653 high_mode->vscan != mode->vscan || 6654 high_mode->vsync_start - mode->vsync_start != timing_diff || 6655 high_mode->vsync_end - mode->vsync_end != timing_diff) 6656 return false; 6657 else 6658 return true; 6659 } 6660 6661 #if defined(CONFIG_DRM_AMD_DC_FP) 6662 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6663 struct dc_sink *sink, struct dc_stream_state *stream, 6664 struct dsc_dec_dpcd_caps *dsc_caps) 6665 { 6666 stream->timing.flags.DSC = 0; 6667 dsc_caps->is_dsc_supported = false; 6668 6669 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6670 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6671 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6672 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6673 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6674 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6675 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6676 dsc_caps); 6677 } 6678 } 6679 6680 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6681 struct dc_sink *sink, struct dc_stream_state *stream, 6682 struct dsc_dec_dpcd_caps *dsc_caps, 6683 uint32_t max_dsc_target_bpp_limit_override) 6684 { 6685 const struct dc_link_settings *verified_link_cap = NULL; 6686 u32 link_bw_in_kbps; 6687 u32 edp_min_bpp_x16, edp_max_bpp_x16; 6688 struct dc *dc = sink->ctx->dc; 6689 struct dc_dsc_bw_range bw_range = {0}; 6690 struct dc_dsc_config dsc_cfg = {0}; 6691 struct dc_dsc_config_options dsc_options = {0}; 6692 6693 dc_dsc_get_default_config_option(dc, &dsc_options); 6694 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6695 6696 verified_link_cap = dc_link_get_link_cap(stream->link); 6697 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6698 edp_min_bpp_x16 = 8 * 16; 6699 edp_max_bpp_x16 = 8 * 16; 6700 6701 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6702 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6703 6704 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6705 edp_min_bpp_x16 = edp_max_bpp_x16; 6706 6707 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6708 dc->debug.dsc_min_slice_height_override, 6709 edp_min_bpp_x16, edp_max_bpp_x16, 6710 dsc_caps, 6711 &stream->timing, 6712 dc_link_get_highest_encoding_format(aconnector->dc_link), 6713 &bw_range)) { 6714 6715 if (bw_range.max_kbps < link_bw_in_kbps) { 6716 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6717 dsc_caps, 6718 &dsc_options, 6719 0, 6720 &stream->timing, 6721 dc_link_get_highest_encoding_format(aconnector->dc_link), 6722 &dsc_cfg)) { 6723 stream->timing.dsc_cfg = dsc_cfg; 6724 stream->timing.flags.DSC = 1; 6725 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6726 } 6727 return; 6728 } 6729 } 6730 6731 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6732 dsc_caps, 6733 &dsc_options, 6734 link_bw_in_kbps, 6735 &stream->timing, 6736 dc_link_get_highest_encoding_format(aconnector->dc_link), 6737 &dsc_cfg)) { 6738 stream->timing.dsc_cfg = dsc_cfg; 6739 stream->timing.flags.DSC = 1; 6740 } 6741 } 6742 6743 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6744 struct dc_sink *sink, struct dc_stream_state *stream, 6745 struct dsc_dec_dpcd_caps *dsc_caps) 6746 { 6747 struct drm_connector *drm_connector = &aconnector->base; 6748 u32 link_bandwidth_kbps; 6749 struct dc *dc = sink->ctx->dc; 6750 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 6751 u32 dsc_max_supported_bw_in_kbps; 6752 u32 max_dsc_target_bpp_limit_override = 6753 drm_connector->display_info.max_dsc_bpp; 6754 struct dc_dsc_config_options dsc_options = {0}; 6755 6756 dc_dsc_get_default_config_option(dc, &dsc_options); 6757 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 6758 6759 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6760 dc_link_get_link_cap(aconnector->dc_link)); 6761 6762 /* Set DSC policy according to dsc_clock_en */ 6763 dc_dsc_policy_set_enable_dsc_when_not_needed( 6764 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6765 6766 if (sink->sink_signal == SIGNAL_TYPE_EDP && 6767 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 6768 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6769 6770 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6771 6772 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6773 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6774 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6775 dsc_caps, 6776 &dsc_options, 6777 link_bandwidth_kbps, 6778 &stream->timing, 6779 dc_link_get_highest_encoding_format(aconnector->dc_link), 6780 &stream->timing.dsc_cfg)) { 6781 stream->timing.flags.DSC = 1; 6782 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", 6783 __func__, drm_connector->name); 6784 } 6785 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6786 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 6787 dc_link_get_highest_encoding_format(aconnector->dc_link)); 6788 max_supported_bw_in_kbps = link_bandwidth_kbps; 6789 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6790 6791 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6792 max_supported_bw_in_kbps > 0 && 6793 dsc_max_supported_bw_in_kbps > 0) 6794 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6795 dsc_caps, 6796 &dsc_options, 6797 dsc_max_supported_bw_in_kbps, 6798 &stream->timing, 6799 dc_link_get_highest_encoding_format(aconnector->dc_link), 6800 &stream->timing.dsc_cfg)) { 6801 stream->timing.flags.DSC = 1; 6802 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", 6803 __func__, drm_connector->name); 6804 } 6805 } 6806 } 6807 6808 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6809 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6810 stream->timing.flags.DSC = 1; 6811 6812 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6813 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6814 6815 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6816 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6817 6818 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6819 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6820 } 6821 #endif 6822 6823 static struct dc_stream_state * 6824 create_stream_for_sink(struct drm_connector *connector, 6825 const struct drm_display_mode *drm_mode, 6826 const struct dm_connector_state *dm_state, 6827 const struct dc_stream_state *old_stream, 6828 int requested_bpc) 6829 { 6830 struct drm_device *dev = connector->dev; 6831 struct amdgpu_dm_connector *aconnector = NULL; 6832 struct drm_display_mode *preferred_mode = NULL; 6833 const struct drm_connector_state *con_state = &dm_state->base; 6834 struct dc_stream_state *stream = NULL; 6835 struct drm_display_mode mode; 6836 struct drm_display_mode saved_mode; 6837 struct drm_display_mode *freesync_mode = NULL; 6838 bool native_mode_found = false; 6839 bool recalculate_timing = false; 6840 bool scale = dm_state->scaling != RMX_OFF; 6841 int mode_refresh; 6842 int preferred_refresh = 0; 6843 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6844 #if defined(CONFIG_DRM_AMD_DC_FP) 6845 struct dsc_dec_dpcd_caps dsc_caps; 6846 #endif 6847 struct dc_link *link = NULL; 6848 struct dc_sink *sink = NULL; 6849 6850 drm_mode_init(&mode, drm_mode); 6851 memset(&saved_mode, 0, sizeof(saved_mode)); 6852 6853 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { 6854 aconnector = NULL; 6855 aconnector = to_amdgpu_dm_connector(connector); 6856 link = aconnector->dc_link; 6857 } else { 6858 struct drm_writeback_connector *wbcon = NULL; 6859 struct amdgpu_dm_wb_connector *dm_wbcon = NULL; 6860 6861 wbcon = drm_connector_to_writeback(connector); 6862 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); 6863 link = dm_wbcon->link; 6864 } 6865 6866 if (!aconnector || !aconnector->dc_sink) { 6867 sink = create_fake_sink(dev, link); 6868 if (!sink) 6869 return stream; 6870 6871 } else { 6872 sink = aconnector->dc_sink; 6873 dc_sink_retain(sink); 6874 } 6875 6876 stream = dc_create_stream_for_sink(sink); 6877 6878 if (stream == NULL) { 6879 drm_err(dev, "Failed to create stream for sink!\n"); 6880 goto finish; 6881 } 6882 6883 /* We leave this NULL for writeback connectors */ 6884 stream->dm_stream_context = aconnector; 6885 6886 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6887 connector->display_info.hdmi.scdc.scrambling.low_rates; 6888 6889 list_for_each_entry(preferred_mode, &connector->modes, head) { 6890 /* Search for preferred mode */ 6891 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6892 native_mode_found = true; 6893 break; 6894 } 6895 } 6896 if (!native_mode_found) 6897 preferred_mode = list_first_entry_or_null( 6898 &connector->modes, 6899 struct drm_display_mode, 6900 head); 6901 6902 mode_refresh = drm_mode_vrefresh(&mode); 6903 6904 if (preferred_mode == NULL) { 6905 /* 6906 * This may not be an error, the use case is when we have no 6907 * usermode calls to reset and set mode upon hotplug. In this 6908 * case, we call set mode ourselves to restore the previous mode 6909 * and the modelist may not be filled in time. 6910 */ 6911 drm_dbg_driver(dev, "No preferred mode found\n"); 6912 } else if (aconnector) { 6913 recalculate_timing = amdgpu_freesync_vid_mode && 6914 is_freesync_video_mode(&mode, aconnector); 6915 if (recalculate_timing) { 6916 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6917 drm_mode_copy(&saved_mode, &mode); 6918 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 6919 drm_mode_copy(&mode, freesync_mode); 6920 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 6921 } else { 6922 decide_crtc_timing_for_drm_display_mode( 6923 &mode, preferred_mode, scale); 6924 6925 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6926 } 6927 } 6928 6929 if (recalculate_timing) 6930 drm_mode_set_crtcinfo(&saved_mode, 0); 6931 6932 /* 6933 * If scaling is enabled and refresh rate didn't change 6934 * we copy the vic and polarities of the old timings 6935 */ 6936 if (!scale || mode_refresh != preferred_refresh) 6937 fill_stream_properties_from_drm_display_mode( 6938 stream, &mode, connector, con_state, NULL, 6939 requested_bpc); 6940 else 6941 fill_stream_properties_from_drm_display_mode( 6942 stream, &mode, connector, con_state, old_stream, 6943 requested_bpc); 6944 6945 /* The rest isn't needed for writeback connectors */ 6946 if (!aconnector) 6947 goto finish; 6948 6949 if (aconnector->timing_changed) { 6950 drm_dbg(aconnector->base.dev, 6951 "overriding timing for automated test, bpc %d, changing to %d\n", 6952 stream->timing.display_color_depth, 6953 aconnector->timing_requested->display_color_depth); 6954 stream->timing = *aconnector->timing_requested; 6955 } 6956 6957 #if defined(CONFIG_DRM_AMD_DC_FP) 6958 /* SST DSC determination policy */ 6959 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6960 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6961 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6962 #endif 6963 6964 update_stream_scaling_settings(&mode, dm_state, stream); 6965 6966 fill_audio_info( 6967 &stream->audio_info, 6968 connector, 6969 sink); 6970 6971 update_stream_signal(stream, sink); 6972 6973 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6974 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6975 6976 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6977 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6978 stream->signal == SIGNAL_TYPE_EDP) { 6979 const struct dc_edid_caps *edid_caps; 6980 unsigned int disable_colorimetry = 0; 6981 6982 if (aconnector->dc_sink) { 6983 edid_caps = &aconnector->dc_sink->edid_caps; 6984 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; 6985 } 6986 6987 // 6988 // should decide stream support vsc sdp colorimetry capability 6989 // before building vsc info packet 6990 // 6991 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 6992 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && 6993 !disable_colorimetry; 6994 6995 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 6996 tf = TRANSFER_FUNC_GAMMA_22; 6997 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6998 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6999 7000 } 7001 finish: 7002 dc_sink_release(sink); 7003 7004 return stream; 7005 } 7006 7007 static enum drm_connector_status 7008 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 7009 { 7010 bool connected; 7011 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7012 7013 /* 7014 * Notes: 7015 * 1. This interface is NOT called in context of HPD irq. 7016 * 2. This interface *is called* in context of user-mode ioctl. Which 7017 * makes it a bad place for *any* MST-related activity. 7018 */ 7019 7020 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 7021 !aconnector->fake_enable) 7022 connected = (aconnector->dc_sink != NULL); 7023 else 7024 connected = (aconnector->base.force == DRM_FORCE_ON || 7025 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 7026 7027 update_subconnector_property(aconnector); 7028 7029 return (connected ? connector_status_connected : 7030 connector_status_disconnected); 7031 } 7032 7033 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 7034 struct drm_connector_state *connector_state, 7035 struct drm_property *property, 7036 uint64_t val) 7037 { 7038 struct drm_device *dev = connector->dev; 7039 struct amdgpu_device *adev = drm_to_adev(dev); 7040 struct dm_connector_state *dm_old_state = 7041 to_dm_connector_state(connector->state); 7042 struct dm_connector_state *dm_new_state = 7043 to_dm_connector_state(connector_state); 7044 7045 int ret = -EINVAL; 7046 7047 if (property == dev->mode_config.scaling_mode_property) { 7048 enum amdgpu_rmx_type rmx_type; 7049 7050 switch (val) { 7051 case DRM_MODE_SCALE_CENTER: 7052 rmx_type = RMX_CENTER; 7053 break; 7054 case DRM_MODE_SCALE_ASPECT: 7055 rmx_type = RMX_ASPECT; 7056 break; 7057 case DRM_MODE_SCALE_FULLSCREEN: 7058 rmx_type = RMX_FULL; 7059 break; 7060 case DRM_MODE_SCALE_NONE: 7061 default: 7062 rmx_type = RMX_OFF; 7063 break; 7064 } 7065 7066 if (dm_old_state->scaling == rmx_type) 7067 return 0; 7068 7069 dm_new_state->scaling = rmx_type; 7070 ret = 0; 7071 } else if (property == adev->mode_info.underscan_hborder_property) { 7072 dm_new_state->underscan_hborder = val; 7073 ret = 0; 7074 } else if (property == adev->mode_info.underscan_vborder_property) { 7075 dm_new_state->underscan_vborder = val; 7076 ret = 0; 7077 } else if (property == adev->mode_info.underscan_property) { 7078 dm_new_state->underscan_enable = val; 7079 ret = 0; 7080 } 7081 7082 return ret; 7083 } 7084 7085 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 7086 const struct drm_connector_state *state, 7087 struct drm_property *property, 7088 uint64_t *val) 7089 { 7090 struct drm_device *dev = connector->dev; 7091 struct amdgpu_device *adev = drm_to_adev(dev); 7092 struct dm_connector_state *dm_state = 7093 to_dm_connector_state(state); 7094 int ret = -EINVAL; 7095 7096 if (property == dev->mode_config.scaling_mode_property) { 7097 switch (dm_state->scaling) { 7098 case RMX_CENTER: 7099 *val = DRM_MODE_SCALE_CENTER; 7100 break; 7101 case RMX_ASPECT: 7102 *val = DRM_MODE_SCALE_ASPECT; 7103 break; 7104 case RMX_FULL: 7105 *val = DRM_MODE_SCALE_FULLSCREEN; 7106 break; 7107 case RMX_OFF: 7108 default: 7109 *val = DRM_MODE_SCALE_NONE; 7110 break; 7111 } 7112 ret = 0; 7113 } else if (property == adev->mode_info.underscan_hborder_property) { 7114 *val = dm_state->underscan_hborder; 7115 ret = 0; 7116 } else if (property == adev->mode_info.underscan_vborder_property) { 7117 *val = dm_state->underscan_vborder; 7118 ret = 0; 7119 } else if (property == adev->mode_info.underscan_property) { 7120 *val = dm_state->underscan_enable; 7121 ret = 0; 7122 } 7123 7124 return ret; 7125 } 7126 7127 /** 7128 * DOC: panel power savings 7129 * 7130 * The display manager allows you to set your desired **panel power savings** 7131 * level (between 0-4, with 0 representing off), e.g. using the following:: 7132 * 7133 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings 7134 * 7135 * Modifying this value can have implications on color accuracy, so tread 7136 * carefully. 7137 */ 7138 7139 static ssize_t panel_power_savings_show(struct device *device, 7140 struct device_attribute *attr, 7141 char *buf) 7142 { 7143 struct drm_connector *connector = dev_get_drvdata(device); 7144 struct drm_device *dev = connector->dev; 7145 u8 val; 7146 7147 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 7148 val = to_dm_connector_state(connector->state)->abm_level == 7149 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : 7150 to_dm_connector_state(connector->state)->abm_level; 7151 drm_modeset_unlock(&dev->mode_config.connection_mutex); 7152 7153 return sysfs_emit(buf, "%u\n", val); 7154 } 7155 7156 static ssize_t panel_power_savings_store(struct device *device, 7157 struct device_attribute *attr, 7158 const char *buf, size_t count) 7159 { 7160 struct drm_connector *connector = dev_get_drvdata(device); 7161 struct drm_device *dev = connector->dev; 7162 long val; 7163 int ret; 7164 7165 ret = kstrtol(buf, 0, &val); 7166 7167 if (ret) 7168 return ret; 7169 7170 if (val < 0 || val > 4) 7171 return -EINVAL; 7172 7173 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 7174 to_dm_connector_state(connector->state)->abm_level = val ?: 7175 ABM_LEVEL_IMMEDIATE_DISABLE; 7176 drm_modeset_unlock(&dev->mode_config.connection_mutex); 7177 7178 drm_kms_helper_hotplug_event(dev); 7179 7180 return count; 7181 } 7182 7183 static DEVICE_ATTR_RW(panel_power_savings); 7184 7185 static struct attribute *amdgpu_attrs[] = { 7186 &dev_attr_panel_power_savings.attr, 7187 NULL 7188 }; 7189 7190 static const struct attribute_group amdgpu_group = { 7191 .name = "amdgpu", 7192 .attrs = amdgpu_attrs 7193 }; 7194 7195 static bool 7196 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) 7197 { 7198 if (amdgpu_dm_abm_level >= 0) 7199 return false; 7200 7201 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 7202 return false; 7203 7204 /* check for OLED panels */ 7205 if (amdgpu_dm_connector->bl_idx >= 0) { 7206 struct drm_device *drm = amdgpu_dm_connector->base.dev; 7207 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 7208 struct amdgpu_dm_backlight_caps *caps; 7209 7210 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; 7211 if (caps->aux_support) 7212 return false; 7213 } 7214 7215 return true; 7216 } 7217 7218 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7219 { 7220 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7221 7222 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) 7223 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); 7224 7225 cec_notifier_conn_unregister(amdgpu_dm_connector->notifier); 7226 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7227 } 7228 7229 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7230 { 7231 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7232 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7233 struct amdgpu_display_manager *dm = &adev->dm; 7234 7235 /* 7236 * Call only if mst_mgr was initialized before since it's not done 7237 * for all connector types. 7238 */ 7239 if (aconnector->mst_mgr.dev) 7240 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7241 7242 if (aconnector->bl_idx != -1) { 7243 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 7244 dm->backlight_dev[aconnector->bl_idx] = NULL; 7245 } 7246 7247 if (aconnector->dc_em_sink) 7248 dc_sink_release(aconnector->dc_em_sink); 7249 aconnector->dc_em_sink = NULL; 7250 if (aconnector->dc_sink) 7251 dc_sink_release(aconnector->dc_sink); 7252 aconnector->dc_sink = NULL; 7253 7254 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7255 drm_connector_unregister(connector); 7256 drm_connector_cleanup(connector); 7257 if (aconnector->i2c) { 7258 i2c_del_adapter(&aconnector->i2c->base); 7259 kfree(aconnector->i2c); 7260 } 7261 kfree(aconnector->dm_dp_aux.aux.name); 7262 7263 kfree(connector); 7264 } 7265 7266 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7267 { 7268 struct dm_connector_state *state = 7269 to_dm_connector_state(connector->state); 7270 7271 if (connector->state) 7272 __drm_atomic_helper_connector_destroy_state(connector->state); 7273 7274 kfree(state); 7275 7276 state = kzalloc(sizeof(*state), GFP_KERNEL); 7277 7278 if (state) { 7279 state->scaling = RMX_OFF; 7280 state->underscan_enable = false; 7281 state->underscan_hborder = 0; 7282 state->underscan_vborder = 0; 7283 state->base.max_requested_bpc = 8; 7284 state->vcpi_slots = 0; 7285 state->pbn = 0; 7286 7287 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 7288 if (amdgpu_dm_abm_level <= 0) 7289 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; 7290 else 7291 state->abm_level = amdgpu_dm_abm_level; 7292 } 7293 7294 __drm_atomic_helper_connector_reset(connector, &state->base); 7295 } 7296 } 7297 7298 struct drm_connector_state * 7299 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7300 { 7301 struct dm_connector_state *state = 7302 to_dm_connector_state(connector->state); 7303 7304 struct dm_connector_state *new_state = 7305 kmemdup(state, sizeof(*state), GFP_KERNEL); 7306 7307 if (!new_state) 7308 return NULL; 7309 7310 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7311 7312 new_state->freesync_capable = state->freesync_capable; 7313 new_state->abm_level = state->abm_level; 7314 new_state->scaling = state->scaling; 7315 new_state->underscan_enable = state->underscan_enable; 7316 new_state->underscan_hborder = state->underscan_hborder; 7317 new_state->underscan_vborder = state->underscan_vborder; 7318 new_state->vcpi_slots = state->vcpi_slots; 7319 new_state->pbn = state->pbn; 7320 return &new_state->base; 7321 } 7322 7323 static int 7324 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7325 { 7326 struct amdgpu_dm_connector *amdgpu_dm_connector = 7327 to_amdgpu_dm_connector(connector); 7328 int r; 7329 7330 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { 7331 r = sysfs_create_group(&connector->kdev->kobj, 7332 &amdgpu_group); 7333 if (r) 7334 return r; 7335 } 7336 7337 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 7338 7339 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7340 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7341 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7342 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7343 if (r) 7344 return r; 7345 } 7346 7347 #if defined(CONFIG_DEBUG_FS) 7348 connector_debugfs_init(amdgpu_dm_connector); 7349 #endif 7350 7351 return 0; 7352 } 7353 7354 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 7355 { 7356 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7357 struct dc_link *dc_link = aconnector->dc_link; 7358 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 7359 const struct drm_edid *drm_edid; 7360 struct i2c_adapter *ddc; 7361 struct drm_device *dev = connector->dev; 7362 7363 if (dc_link && dc_link->aux_mode) 7364 ddc = &aconnector->dm_dp_aux.aux.ddc; 7365 else 7366 ddc = &aconnector->i2c->base; 7367 7368 drm_edid = drm_edid_read_ddc(connector, ddc); 7369 drm_edid_connector_update(connector, drm_edid); 7370 if (!drm_edid) { 7371 drm_err(dev, "No EDID found on connector: %s.\n", connector->name); 7372 return; 7373 } 7374 7375 aconnector->drm_edid = drm_edid; 7376 /* Update emulated (virtual) sink's EDID */ 7377 if (dc_em_sink && dc_link) { 7378 // FIXME: Get rid of drm_edid_raw() 7379 const struct edid *edid = drm_edid_raw(drm_edid); 7380 7381 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 7382 memmove(dc_em_sink->dc_edid.raw_edid, edid, 7383 (edid->extensions + 1) * EDID_LENGTH); 7384 dm_helpers_parse_edid_caps( 7385 dc_link, 7386 &dc_em_sink->dc_edid, 7387 &dc_em_sink->edid_caps); 7388 } 7389 } 7390 7391 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7392 .reset = amdgpu_dm_connector_funcs_reset, 7393 .detect = amdgpu_dm_connector_detect, 7394 .fill_modes = drm_helper_probe_single_connector_modes, 7395 .destroy = amdgpu_dm_connector_destroy, 7396 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7397 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7398 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7399 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7400 .late_register = amdgpu_dm_connector_late_register, 7401 .early_unregister = amdgpu_dm_connector_unregister, 7402 .force = amdgpu_dm_connector_funcs_force 7403 }; 7404 7405 static int get_modes(struct drm_connector *connector) 7406 { 7407 return amdgpu_dm_connector_get_modes(connector); 7408 } 7409 7410 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7411 { 7412 struct drm_connector *connector = &aconnector->base; 7413 struct dc_link *dc_link = aconnector->dc_link; 7414 struct dc_sink_init_data init_params = { 7415 .link = aconnector->dc_link, 7416 .sink_signal = SIGNAL_TYPE_VIRTUAL 7417 }; 7418 const struct drm_edid *drm_edid; 7419 const struct edid *edid; 7420 struct i2c_adapter *ddc; 7421 7422 if (dc_link && dc_link->aux_mode) 7423 ddc = &aconnector->dm_dp_aux.aux.ddc; 7424 else 7425 ddc = &aconnector->i2c->base; 7426 7427 drm_edid = drm_edid_read_ddc(connector, ddc); 7428 drm_edid_connector_update(connector, drm_edid); 7429 if (!drm_edid) { 7430 drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name); 7431 return; 7432 } 7433 7434 if (connector->display_info.is_hdmi) 7435 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 7436 7437 aconnector->drm_edid = drm_edid; 7438 7439 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 7440 aconnector->dc_em_sink = dc_link_add_remote_sink( 7441 aconnector->dc_link, 7442 (uint8_t *)edid, 7443 (edid->extensions + 1) * EDID_LENGTH, 7444 &init_params); 7445 7446 if (aconnector->base.force == DRM_FORCE_ON) { 7447 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7448 aconnector->dc_link->local_sink : 7449 aconnector->dc_em_sink; 7450 if (aconnector->dc_sink) 7451 dc_sink_retain(aconnector->dc_sink); 7452 } 7453 } 7454 7455 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7456 { 7457 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7458 7459 /* 7460 * In case of headless boot with force on for DP managed connector 7461 * Those settings have to be != 0 to get initial modeset 7462 */ 7463 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7464 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7465 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7466 } 7467 7468 create_eml_sink(aconnector); 7469 } 7470 7471 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 7472 struct dc_stream_state *stream) 7473 { 7474 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 7475 struct dc_plane_state *dc_plane_state = NULL; 7476 struct dc_state *dc_state = NULL; 7477 7478 if (!stream) 7479 goto cleanup; 7480 7481 dc_plane_state = dc_create_plane_state(dc); 7482 if (!dc_plane_state) 7483 goto cleanup; 7484 7485 dc_state = dc_state_create(dc, NULL); 7486 if (!dc_state) 7487 goto cleanup; 7488 7489 /* populate stream to plane */ 7490 dc_plane_state->src_rect.height = stream->src.height; 7491 dc_plane_state->src_rect.width = stream->src.width; 7492 dc_plane_state->dst_rect.height = stream->src.height; 7493 dc_plane_state->dst_rect.width = stream->src.width; 7494 dc_plane_state->clip_rect.height = stream->src.height; 7495 dc_plane_state->clip_rect.width = stream->src.width; 7496 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 7497 dc_plane_state->plane_size.surface_size.height = stream->src.height; 7498 dc_plane_state->plane_size.surface_size.width = stream->src.width; 7499 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 7500 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 7501 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 7502 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 7503 dc_plane_state->rotation = ROTATION_ANGLE_0; 7504 dc_plane_state->is_tiling_rotated = false; 7505 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 7506 7507 dc_result = dc_validate_stream(dc, stream); 7508 if (dc_result == DC_OK) 7509 dc_result = dc_validate_plane(dc, dc_plane_state); 7510 7511 if (dc_result == DC_OK) 7512 dc_result = dc_state_add_stream(dc, dc_state, stream); 7513 7514 if (dc_result == DC_OK && !dc_state_add_plane( 7515 dc, 7516 stream, 7517 dc_plane_state, 7518 dc_state)) 7519 dc_result = DC_FAIL_ATTACH_SURFACES; 7520 7521 if (dc_result == DC_OK) 7522 dc_result = dc_validate_global_state(dc, dc_state, true); 7523 7524 cleanup: 7525 if (dc_state) 7526 dc_state_release(dc_state); 7527 7528 if (dc_plane_state) 7529 dc_plane_state_release(dc_plane_state); 7530 7531 return dc_result; 7532 } 7533 7534 struct dc_stream_state * 7535 create_validate_stream_for_sink(struct drm_connector *connector, 7536 const struct drm_display_mode *drm_mode, 7537 const struct dm_connector_state *dm_state, 7538 const struct dc_stream_state *old_stream) 7539 { 7540 struct amdgpu_dm_connector *aconnector = NULL; 7541 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7542 struct dc_stream_state *stream; 7543 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7544 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7545 enum dc_status dc_result = DC_OK; 7546 uint8_t bpc_limit = 6; 7547 7548 if (!dm_state) 7549 return NULL; 7550 7551 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 7552 aconnector = to_amdgpu_dm_connector(connector); 7553 7554 if (aconnector && 7555 (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || 7556 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) 7557 bpc_limit = 8; 7558 7559 do { 7560 stream = create_stream_for_sink(connector, drm_mode, 7561 dm_state, old_stream, 7562 requested_bpc); 7563 if (stream == NULL) { 7564 drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n"); 7565 break; 7566 } 7567 7568 dc_result = dc_validate_stream(adev->dm.dc, stream); 7569 7570 if (!aconnector) /* writeback connector */ 7571 return stream; 7572 7573 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 7574 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 7575 7576 if (dc_result == DC_OK) 7577 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 7578 7579 if (dc_result != DC_OK) { 7580 DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", 7581 drm_mode->hdisplay, 7582 drm_mode->vdisplay, 7583 drm_mode->clock, 7584 dc_pixel_encoding_to_str(stream->timing.pixel_encoding), 7585 dc_color_depth_to_str(stream->timing.display_color_depth), 7586 dc_status_to_str(dc_result)); 7587 7588 dc_stream_release(stream); 7589 stream = NULL; 7590 requested_bpc -= 2; /* lower bpc to retry validation */ 7591 } 7592 7593 } while (stream == NULL && requested_bpc >= bpc_limit); 7594 7595 if ((dc_result == DC_FAIL_ENC_VALIDATE || 7596 dc_result == DC_EXCEED_DONGLE_CAP) && 7597 !aconnector->force_yuv420_output) { 7598 DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", 7599 __func__, __LINE__); 7600 7601 aconnector->force_yuv420_output = true; 7602 stream = create_validate_stream_for_sink(connector, drm_mode, 7603 dm_state, old_stream); 7604 aconnector->force_yuv420_output = false; 7605 } 7606 7607 return stream; 7608 } 7609 7610 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7611 const struct drm_display_mode *mode) 7612 { 7613 int result = MODE_ERROR; 7614 struct dc_sink *dc_sink; 7615 struct drm_display_mode *test_mode; 7616 /* TODO: Unhardcode stream count */ 7617 struct dc_stream_state *stream; 7618 /* we always have an amdgpu_dm_connector here since we got 7619 * here via the amdgpu_dm_connector_helper_funcs 7620 */ 7621 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7622 7623 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7624 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7625 return result; 7626 7627 /* 7628 * Only run this the first time mode_valid is called to initilialize 7629 * EDID mgmt 7630 */ 7631 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7632 !aconnector->dc_em_sink) 7633 handle_edid_mgmt(aconnector); 7634 7635 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7636 7637 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7638 aconnector->base.force != DRM_FORCE_ON) { 7639 drm_err(connector->dev, "dc_sink is NULL!\n"); 7640 goto fail; 7641 } 7642 7643 test_mode = drm_mode_duplicate(connector->dev, mode); 7644 if (!test_mode) 7645 goto fail; 7646 7647 drm_mode_set_crtcinfo(test_mode, 0); 7648 7649 stream = create_validate_stream_for_sink(connector, test_mode, 7650 to_dm_connector_state(connector->state), 7651 NULL); 7652 drm_mode_destroy(connector->dev, test_mode); 7653 if (stream) { 7654 dc_stream_release(stream); 7655 result = MODE_OK; 7656 } 7657 7658 fail: 7659 /* TODO: error handling*/ 7660 return result; 7661 } 7662 7663 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7664 struct dc_info_packet *out) 7665 { 7666 struct hdmi_drm_infoframe frame; 7667 unsigned char buf[30]; /* 26 + 4 */ 7668 ssize_t len; 7669 int ret, i; 7670 7671 memset(out, 0, sizeof(*out)); 7672 7673 if (!state->hdr_output_metadata) 7674 return 0; 7675 7676 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7677 if (ret) 7678 return ret; 7679 7680 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7681 if (len < 0) 7682 return (int)len; 7683 7684 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7685 if (len != 30) 7686 return -EINVAL; 7687 7688 /* Prepare the infopacket for DC. */ 7689 switch (state->connector->connector_type) { 7690 case DRM_MODE_CONNECTOR_HDMIA: 7691 out->hb0 = 0x87; /* type */ 7692 out->hb1 = 0x01; /* version */ 7693 out->hb2 = 0x1A; /* length */ 7694 out->sb[0] = buf[3]; /* checksum */ 7695 i = 1; 7696 break; 7697 7698 case DRM_MODE_CONNECTOR_DisplayPort: 7699 case DRM_MODE_CONNECTOR_eDP: 7700 out->hb0 = 0x00; /* sdp id, zero */ 7701 out->hb1 = 0x87; /* type */ 7702 out->hb2 = 0x1D; /* payload len - 1 */ 7703 out->hb3 = (0x13 << 2); /* sdp version */ 7704 out->sb[0] = 0x01; /* version */ 7705 out->sb[1] = 0x1A; /* length */ 7706 i = 2; 7707 break; 7708 7709 default: 7710 return -EINVAL; 7711 } 7712 7713 memcpy(&out->sb[i], &buf[4], 26); 7714 out->valid = true; 7715 7716 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7717 sizeof(out->sb), false); 7718 7719 return 0; 7720 } 7721 7722 static int 7723 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7724 struct drm_atomic_state *state) 7725 { 7726 struct drm_connector_state *new_con_state = 7727 drm_atomic_get_new_connector_state(state, conn); 7728 struct drm_connector_state *old_con_state = 7729 drm_atomic_get_old_connector_state(state, conn); 7730 struct drm_crtc *crtc = new_con_state->crtc; 7731 struct drm_crtc_state *new_crtc_state; 7732 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 7733 int ret; 7734 7735 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7736 7737 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 7738 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 7739 if (ret < 0) 7740 return ret; 7741 } 7742 7743 if (!crtc) 7744 return 0; 7745 7746 if (new_con_state->colorspace != old_con_state->colorspace) { 7747 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7748 if (IS_ERR(new_crtc_state)) 7749 return PTR_ERR(new_crtc_state); 7750 7751 new_crtc_state->mode_changed = true; 7752 } 7753 7754 if (new_con_state->content_type != old_con_state->content_type) { 7755 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7756 if (IS_ERR(new_crtc_state)) 7757 return PTR_ERR(new_crtc_state); 7758 7759 new_crtc_state->mode_changed = true; 7760 } 7761 7762 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7763 struct dc_info_packet hdr_infopacket; 7764 7765 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7766 if (ret) 7767 return ret; 7768 7769 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7770 if (IS_ERR(new_crtc_state)) 7771 return PTR_ERR(new_crtc_state); 7772 7773 /* 7774 * DC considers the stream backends changed if the 7775 * static metadata changes. Forcing the modeset also 7776 * gives a simple way for userspace to switch from 7777 * 8bpc to 10bpc when setting the metadata to enter 7778 * or exit HDR. 7779 * 7780 * Changing the static metadata after it's been 7781 * set is permissible, however. So only force a 7782 * modeset if we're entering or exiting HDR. 7783 */ 7784 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 7785 !old_con_state->hdr_output_metadata || 7786 !new_con_state->hdr_output_metadata; 7787 } 7788 7789 return 0; 7790 } 7791 7792 static const struct drm_connector_helper_funcs 7793 amdgpu_dm_connector_helper_funcs = { 7794 /* 7795 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7796 * modes will be filtered by drm_mode_validate_size(), and those modes 7797 * are missing after user start lightdm. So we need to renew modes list. 7798 * in get_modes call back, not just return the modes count 7799 */ 7800 .get_modes = get_modes, 7801 .mode_valid = amdgpu_dm_connector_mode_valid, 7802 .atomic_check = amdgpu_dm_connector_atomic_check, 7803 }; 7804 7805 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7806 { 7807 7808 } 7809 7810 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 7811 { 7812 switch (display_color_depth) { 7813 case COLOR_DEPTH_666: 7814 return 6; 7815 case COLOR_DEPTH_888: 7816 return 8; 7817 case COLOR_DEPTH_101010: 7818 return 10; 7819 case COLOR_DEPTH_121212: 7820 return 12; 7821 case COLOR_DEPTH_141414: 7822 return 14; 7823 case COLOR_DEPTH_161616: 7824 return 16; 7825 default: 7826 break; 7827 } 7828 return 0; 7829 } 7830 7831 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7832 struct drm_crtc_state *crtc_state, 7833 struct drm_connector_state *conn_state) 7834 { 7835 struct drm_atomic_state *state = crtc_state->state; 7836 struct drm_connector *connector = conn_state->connector; 7837 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7838 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7839 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7840 struct drm_dp_mst_topology_mgr *mst_mgr; 7841 struct drm_dp_mst_port *mst_port; 7842 struct drm_dp_mst_topology_state *mst_state; 7843 enum dc_color_depth color_depth; 7844 int clock, bpp = 0; 7845 bool is_y420 = false; 7846 7847 if (!aconnector->mst_output_port) 7848 return 0; 7849 7850 mst_port = aconnector->mst_output_port; 7851 mst_mgr = &aconnector->mst_root->mst_mgr; 7852 7853 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7854 return 0; 7855 7856 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 7857 if (IS_ERR(mst_state)) 7858 return PTR_ERR(mst_state); 7859 7860 mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); 7861 7862 if (!state->duplicated) { 7863 int max_bpc = conn_state->max_requested_bpc; 7864 7865 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7866 aconnector->force_yuv420_output; 7867 color_depth = convert_color_depth_from_display_info(connector, 7868 is_y420, 7869 max_bpc); 7870 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7871 clock = adjusted_mode->clock; 7872 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 7873 } 7874 7875 dm_new_connector_state->vcpi_slots = 7876 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 7877 dm_new_connector_state->pbn); 7878 if (dm_new_connector_state->vcpi_slots < 0) { 7879 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7880 return dm_new_connector_state->vcpi_slots; 7881 } 7882 return 0; 7883 } 7884 7885 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7886 .disable = dm_encoder_helper_disable, 7887 .atomic_check = dm_encoder_helper_atomic_check 7888 }; 7889 7890 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7891 struct dc_state *dc_state, 7892 struct dsc_mst_fairness_vars *vars) 7893 { 7894 struct dc_stream_state *stream = NULL; 7895 struct drm_connector *connector; 7896 struct drm_connector_state *new_con_state; 7897 struct amdgpu_dm_connector *aconnector; 7898 struct dm_connector_state *dm_conn_state; 7899 int i, j, ret; 7900 int vcpi, pbn_div, pbn = 0, slot_num = 0; 7901 7902 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7903 7904 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 7905 continue; 7906 7907 aconnector = to_amdgpu_dm_connector(connector); 7908 7909 if (!aconnector->mst_output_port) 7910 continue; 7911 7912 if (!new_con_state || !new_con_state->crtc) 7913 continue; 7914 7915 dm_conn_state = to_dm_connector_state(new_con_state); 7916 7917 for (j = 0; j < dc_state->stream_count; j++) { 7918 stream = dc_state->streams[j]; 7919 if (!stream) 7920 continue; 7921 7922 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 7923 break; 7924 7925 stream = NULL; 7926 } 7927 7928 if (!stream) 7929 continue; 7930 7931 pbn_div = dm_mst_get_pbn_divider(stream->link); 7932 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7933 for (j = 0; j < dc_state->stream_count; j++) { 7934 if (vars[j].aconnector == aconnector) { 7935 pbn = vars[j].pbn; 7936 break; 7937 } 7938 } 7939 7940 if (j == dc_state->stream_count || pbn_div == 0) 7941 continue; 7942 7943 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7944 7945 if (stream->timing.flags.DSC != 1) { 7946 dm_conn_state->pbn = pbn; 7947 dm_conn_state->vcpi_slots = slot_num; 7948 7949 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 7950 dm_conn_state->pbn, false); 7951 if (ret < 0) 7952 return ret; 7953 7954 continue; 7955 } 7956 7957 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 7958 if (vcpi < 0) 7959 return vcpi; 7960 7961 dm_conn_state->pbn = pbn; 7962 dm_conn_state->vcpi_slots = vcpi; 7963 } 7964 return 0; 7965 } 7966 7967 static int to_drm_connector_type(enum signal_type st) 7968 { 7969 switch (st) { 7970 case SIGNAL_TYPE_HDMI_TYPE_A: 7971 return DRM_MODE_CONNECTOR_HDMIA; 7972 case SIGNAL_TYPE_EDP: 7973 return DRM_MODE_CONNECTOR_eDP; 7974 case SIGNAL_TYPE_LVDS: 7975 return DRM_MODE_CONNECTOR_LVDS; 7976 case SIGNAL_TYPE_RGB: 7977 return DRM_MODE_CONNECTOR_VGA; 7978 case SIGNAL_TYPE_DISPLAY_PORT: 7979 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7980 return DRM_MODE_CONNECTOR_DisplayPort; 7981 case SIGNAL_TYPE_DVI_DUAL_LINK: 7982 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7983 return DRM_MODE_CONNECTOR_DVID; 7984 case SIGNAL_TYPE_VIRTUAL: 7985 return DRM_MODE_CONNECTOR_VIRTUAL; 7986 7987 default: 7988 return DRM_MODE_CONNECTOR_Unknown; 7989 } 7990 } 7991 7992 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7993 { 7994 struct drm_encoder *encoder; 7995 7996 /* There is only one encoder per connector */ 7997 drm_connector_for_each_possible_encoder(connector, encoder) 7998 return encoder; 7999 8000 return NULL; 8001 } 8002 8003 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 8004 { 8005 struct drm_encoder *encoder; 8006 struct amdgpu_encoder *amdgpu_encoder; 8007 8008 encoder = amdgpu_dm_connector_to_encoder(connector); 8009 8010 if (encoder == NULL) 8011 return; 8012 8013 amdgpu_encoder = to_amdgpu_encoder(encoder); 8014 8015 amdgpu_encoder->native_mode.clock = 0; 8016 8017 if (!list_empty(&connector->probed_modes)) { 8018 struct drm_display_mode *preferred_mode = NULL; 8019 8020 list_for_each_entry(preferred_mode, 8021 &connector->probed_modes, 8022 head) { 8023 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 8024 amdgpu_encoder->native_mode = *preferred_mode; 8025 8026 break; 8027 } 8028 8029 } 8030 } 8031 8032 static struct drm_display_mode * 8033 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 8034 char *name, 8035 int hdisplay, int vdisplay) 8036 { 8037 struct drm_device *dev = encoder->dev; 8038 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8039 struct drm_display_mode *mode = NULL; 8040 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8041 8042 mode = drm_mode_duplicate(dev, native_mode); 8043 8044 if (mode == NULL) 8045 return NULL; 8046 8047 mode->hdisplay = hdisplay; 8048 mode->vdisplay = vdisplay; 8049 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8050 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 8051 8052 return mode; 8053 8054 } 8055 8056 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 8057 struct drm_connector *connector) 8058 { 8059 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8060 struct drm_display_mode *mode = NULL; 8061 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8062 struct amdgpu_dm_connector *amdgpu_dm_connector = 8063 to_amdgpu_dm_connector(connector); 8064 int i; 8065 int n; 8066 struct mode_size { 8067 char name[DRM_DISPLAY_MODE_LEN]; 8068 int w; 8069 int h; 8070 } common_modes[] = { 8071 { "640x480", 640, 480}, 8072 { "800x600", 800, 600}, 8073 { "1024x768", 1024, 768}, 8074 { "1280x720", 1280, 720}, 8075 { "1280x800", 1280, 800}, 8076 {"1280x1024", 1280, 1024}, 8077 { "1440x900", 1440, 900}, 8078 {"1680x1050", 1680, 1050}, 8079 {"1600x1200", 1600, 1200}, 8080 {"1920x1080", 1920, 1080}, 8081 {"1920x1200", 1920, 1200} 8082 }; 8083 8084 n = ARRAY_SIZE(common_modes); 8085 8086 for (i = 0; i < n; i++) { 8087 struct drm_display_mode *curmode = NULL; 8088 bool mode_existed = false; 8089 8090 if (common_modes[i].w > native_mode->hdisplay || 8091 common_modes[i].h > native_mode->vdisplay || 8092 (common_modes[i].w == native_mode->hdisplay && 8093 common_modes[i].h == native_mode->vdisplay)) 8094 continue; 8095 8096 list_for_each_entry(curmode, &connector->probed_modes, head) { 8097 if (common_modes[i].w == curmode->hdisplay && 8098 common_modes[i].h == curmode->vdisplay) { 8099 mode_existed = true; 8100 break; 8101 } 8102 } 8103 8104 if (mode_existed) 8105 continue; 8106 8107 mode = amdgpu_dm_create_common_mode(encoder, 8108 common_modes[i].name, common_modes[i].w, 8109 common_modes[i].h); 8110 if (!mode) 8111 continue; 8112 8113 drm_mode_probed_add(connector, mode); 8114 amdgpu_dm_connector->num_modes++; 8115 } 8116 } 8117 8118 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 8119 { 8120 struct drm_encoder *encoder; 8121 struct amdgpu_encoder *amdgpu_encoder; 8122 const struct drm_display_mode *native_mode; 8123 8124 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 8125 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 8126 return; 8127 8128 mutex_lock(&connector->dev->mode_config.mutex); 8129 amdgpu_dm_connector_get_modes(connector); 8130 mutex_unlock(&connector->dev->mode_config.mutex); 8131 8132 encoder = amdgpu_dm_connector_to_encoder(connector); 8133 if (!encoder) 8134 return; 8135 8136 amdgpu_encoder = to_amdgpu_encoder(encoder); 8137 8138 native_mode = &amdgpu_encoder->native_mode; 8139 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 8140 return; 8141 8142 drm_connector_set_panel_orientation_with_quirk(connector, 8143 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 8144 native_mode->hdisplay, 8145 native_mode->vdisplay); 8146 } 8147 8148 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 8149 const struct drm_edid *drm_edid) 8150 { 8151 struct amdgpu_dm_connector *amdgpu_dm_connector = 8152 to_amdgpu_dm_connector(connector); 8153 8154 if (drm_edid) { 8155 /* empty probed_modes */ 8156 INIT_LIST_HEAD(&connector->probed_modes); 8157 amdgpu_dm_connector->num_modes = 8158 drm_edid_connector_add_modes(connector); 8159 8160 /* sorting the probed modes before calling function 8161 * amdgpu_dm_get_native_mode() since EDID can have 8162 * more than one preferred mode. The modes that are 8163 * later in the probed mode list could be of higher 8164 * and preferred resolution. For example, 3840x2160 8165 * resolution in base EDID preferred timing and 4096x2160 8166 * preferred resolution in DID extension block later. 8167 */ 8168 drm_mode_sort(&connector->probed_modes); 8169 amdgpu_dm_get_native_mode(connector); 8170 8171 /* Freesync capabilities are reset by calling 8172 * drm_edid_connector_add_modes() and need to be 8173 * restored here. 8174 */ 8175 amdgpu_dm_update_freesync_caps(connector, drm_edid); 8176 } else { 8177 amdgpu_dm_connector->num_modes = 0; 8178 } 8179 } 8180 8181 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 8182 struct drm_display_mode *mode) 8183 { 8184 struct drm_display_mode *m; 8185 8186 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 8187 if (drm_mode_equal(m, mode)) 8188 return true; 8189 } 8190 8191 return false; 8192 } 8193 8194 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 8195 { 8196 const struct drm_display_mode *m; 8197 struct drm_display_mode *new_mode; 8198 uint i; 8199 u32 new_modes_count = 0; 8200 8201 /* Standard FPS values 8202 * 8203 * 23.976 - TV/NTSC 8204 * 24 - Cinema 8205 * 25 - TV/PAL 8206 * 29.97 - TV/NTSC 8207 * 30 - TV/NTSC 8208 * 48 - Cinema HFR 8209 * 50 - TV/PAL 8210 * 60 - Commonly used 8211 * 48,72,96,120 - Multiples of 24 8212 */ 8213 static const u32 common_rates[] = { 8214 23976, 24000, 25000, 29970, 30000, 8215 48000, 50000, 60000, 72000, 96000, 120000 8216 }; 8217 8218 /* 8219 * Find mode with highest refresh rate with the same resolution 8220 * as the preferred mode. Some monitors report a preferred mode 8221 * with lower resolution than the highest refresh rate supported. 8222 */ 8223 8224 m = get_highest_refresh_rate_mode(aconnector, true); 8225 if (!m) 8226 return 0; 8227 8228 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8229 u64 target_vtotal, target_vtotal_diff; 8230 u64 num, den; 8231 8232 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8233 continue; 8234 8235 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8236 common_rates[i] > aconnector->max_vfreq * 1000) 8237 continue; 8238 8239 num = (unsigned long long)m->clock * 1000 * 1000; 8240 den = common_rates[i] * (unsigned long long)m->htotal; 8241 target_vtotal = div_u64(num, den); 8242 target_vtotal_diff = target_vtotal - m->vtotal; 8243 8244 /* Check for illegal modes */ 8245 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8246 m->vsync_end + target_vtotal_diff < m->vsync_start || 8247 m->vtotal + target_vtotal_diff < m->vsync_end) 8248 continue; 8249 8250 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8251 if (!new_mode) 8252 goto out; 8253 8254 new_mode->vtotal += (u16)target_vtotal_diff; 8255 new_mode->vsync_start += (u16)target_vtotal_diff; 8256 new_mode->vsync_end += (u16)target_vtotal_diff; 8257 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8258 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8259 8260 if (!is_duplicate_mode(aconnector, new_mode)) { 8261 drm_mode_probed_add(&aconnector->base, new_mode); 8262 new_modes_count += 1; 8263 } else 8264 drm_mode_destroy(aconnector->base.dev, new_mode); 8265 } 8266 out: 8267 return new_modes_count; 8268 } 8269 8270 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8271 const struct drm_edid *drm_edid) 8272 { 8273 struct amdgpu_dm_connector *amdgpu_dm_connector = 8274 to_amdgpu_dm_connector(connector); 8275 8276 if (!(amdgpu_freesync_vid_mode && drm_edid)) 8277 return; 8278 8279 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8280 amdgpu_dm_connector->num_modes += 8281 add_fs_modes(amdgpu_dm_connector); 8282 } 8283 8284 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8285 { 8286 struct amdgpu_dm_connector *amdgpu_dm_connector = 8287 to_amdgpu_dm_connector(connector); 8288 struct drm_encoder *encoder; 8289 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; 8290 struct dc_link_settings *verified_link_cap = 8291 &amdgpu_dm_connector->dc_link->verified_link_cap; 8292 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 8293 8294 encoder = amdgpu_dm_connector_to_encoder(connector); 8295 8296 if (!drm_edid) { 8297 amdgpu_dm_connector->num_modes = 8298 drm_add_modes_noedid(connector, 640, 480); 8299 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 8300 amdgpu_dm_connector->num_modes += 8301 drm_add_modes_noedid(connector, 1920, 1080); 8302 } else { 8303 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); 8304 if (encoder) 8305 amdgpu_dm_connector_add_common_modes(encoder, connector); 8306 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); 8307 } 8308 amdgpu_dm_fbc_init(connector); 8309 8310 return amdgpu_dm_connector->num_modes; 8311 } 8312 8313 static const u32 supported_colorspaces = 8314 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 8315 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 8316 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 8317 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 8318 8319 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8320 struct amdgpu_dm_connector *aconnector, 8321 int connector_type, 8322 struct dc_link *link, 8323 int link_index) 8324 { 8325 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8326 8327 /* 8328 * Some of the properties below require access to state, like bpc. 8329 * Allocate some default initial connector state with our reset helper. 8330 */ 8331 if (aconnector->base.funcs->reset) 8332 aconnector->base.funcs->reset(&aconnector->base); 8333 8334 aconnector->connector_id = link_index; 8335 aconnector->bl_idx = -1; 8336 aconnector->dc_link = link; 8337 aconnector->base.interlace_allowed = false; 8338 aconnector->base.doublescan_allowed = false; 8339 aconnector->base.stereo_allowed = false; 8340 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8341 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8342 aconnector->audio_inst = -1; 8343 aconnector->pack_sdp_v1_3 = false; 8344 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 8345 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 8346 mutex_init(&aconnector->hpd_lock); 8347 mutex_init(&aconnector->handle_mst_msg_ready); 8348 8349 /* 8350 * configure support HPD hot plug connector_>polled default value is 0 8351 * which means HPD hot plug not supported 8352 */ 8353 switch (connector_type) { 8354 case DRM_MODE_CONNECTOR_HDMIA: 8355 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8356 aconnector->base.ycbcr_420_allowed = 8357 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8358 break; 8359 case DRM_MODE_CONNECTOR_DisplayPort: 8360 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8361 link->link_enc = link_enc_cfg_get_link_enc(link); 8362 ASSERT(link->link_enc); 8363 if (link->link_enc) 8364 aconnector->base.ycbcr_420_allowed = 8365 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8366 break; 8367 case DRM_MODE_CONNECTOR_DVID: 8368 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8369 break; 8370 default: 8371 break; 8372 } 8373 8374 drm_object_attach_property(&aconnector->base.base, 8375 dm->ddev->mode_config.scaling_mode_property, 8376 DRM_MODE_SCALE_NONE); 8377 8378 if (connector_type == DRM_MODE_CONNECTOR_HDMIA 8379 || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root)) 8380 drm_connector_attach_broadcast_rgb_property(&aconnector->base); 8381 8382 drm_object_attach_property(&aconnector->base.base, 8383 adev->mode_info.underscan_property, 8384 UNDERSCAN_OFF); 8385 drm_object_attach_property(&aconnector->base.base, 8386 adev->mode_info.underscan_hborder_property, 8387 0); 8388 drm_object_attach_property(&aconnector->base.base, 8389 adev->mode_info.underscan_vborder_property, 8390 0); 8391 8392 if (!aconnector->mst_root) 8393 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8394 8395 aconnector->base.state->max_bpc = 16; 8396 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8397 8398 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8399 /* Content Type is currently only implemented for HDMI. */ 8400 drm_connector_attach_content_type_property(&aconnector->base); 8401 } 8402 8403 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 8404 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 8405 drm_connector_attach_colorspace_property(&aconnector->base); 8406 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 8407 connector_type == DRM_MODE_CONNECTOR_eDP) { 8408 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 8409 drm_connector_attach_colorspace_property(&aconnector->base); 8410 } 8411 8412 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8413 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8414 connector_type == DRM_MODE_CONNECTOR_eDP) { 8415 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8416 8417 if (!aconnector->mst_root) 8418 drm_connector_attach_vrr_capable_property(&aconnector->base); 8419 8420 if (adev->dm.hdcp_workqueue) 8421 drm_connector_attach_content_protection_property(&aconnector->base, true); 8422 } 8423 } 8424 8425 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8426 struct i2c_msg *msgs, int num) 8427 { 8428 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8429 struct ddc_service *ddc_service = i2c->ddc_service; 8430 struct i2c_command cmd; 8431 int i; 8432 int result = -EIO; 8433 8434 if (!ddc_service->ddc_pin) 8435 return result; 8436 8437 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8438 8439 if (!cmd.payloads) 8440 return result; 8441 8442 cmd.number_of_payloads = num; 8443 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8444 cmd.speed = 100; 8445 8446 for (i = 0; i < num; i++) { 8447 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8448 cmd.payloads[i].address = msgs[i].addr; 8449 cmd.payloads[i].length = msgs[i].len; 8450 cmd.payloads[i].data = msgs[i].buf; 8451 } 8452 8453 if (i2c->oem) { 8454 if (dc_submit_i2c_oem( 8455 ddc_service->ctx->dc, 8456 &cmd)) 8457 result = num; 8458 } else { 8459 if (dc_submit_i2c( 8460 ddc_service->ctx->dc, 8461 ddc_service->link->link_index, 8462 &cmd)) 8463 result = num; 8464 } 8465 8466 kfree(cmd.payloads); 8467 return result; 8468 } 8469 8470 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8471 { 8472 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8473 } 8474 8475 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8476 .master_xfer = amdgpu_dm_i2c_xfer, 8477 .functionality = amdgpu_dm_i2c_func, 8478 }; 8479 8480 static struct amdgpu_i2c_adapter * 8481 create_i2c(struct ddc_service *ddc_service, bool oem) 8482 { 8483 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8484 struct amdgpu_i2c_adapter *i2c; 8485 8486 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8487 if (!i2c) 8488 return NULL; 8489 i2c->base.owner = THIS_MODULE; 8490 i2c->base.dev.parent = &adev->pdev->dev; 8491 i2c->base.algo = &amdgpu_dm_i2c_algo; 8492 if (oem) 8493 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus"); 8494 else 8495 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", 8496 ddc_service->link->link_index); 8497 i2c_set_adapdata(&i2c->base, i2c); 8498 i2c->ddc_service = ddc_service; 8499 i2c->oem = oem; 8500 8501 return i2c; 8502 } 8503 8504 int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector) 8505 { 8506 struct cec_connector_info conn_info; 8507 struct drm_device *ddev = aconnector->base.dev; 8508 struct device *hdmi_dev = ddev->dev; 8509 8510 if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) { 8511 drm_info(ddev, "HDMI-CEC feature masked\n"); 8512 return -EINVAL; 8513 } 8514 8515 cec_fill_conn_info_from_drm(&conn_info, &aconnector->base); 8516 aconnector->notifier = 8517 cec_notifier_conn_register(hdmi_dev, NULL, &conn_info); 8518 if (!aconnector->notifier) { 8519 drm_err(ddev, "Failed to create cec notifier\n"); 8520 return -ENOMEM; 8521 } 8522 8523 return 0; 8524 } 8525 8526 /* 8527 * Note: this function assumes that dc_link_detect() was called for the 8528 * dc_link which will be represented by this aconnector. 8529 */ 8530 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8531 struct amdgpu_dm_connector *aconnector, 8532 u32 link_index, 8533 struct amdgpu_encoder *aencoder) 8534 { 8535 int res = 0; 8536 int connector_type; 8537 struct dc *dc = dm->dc; 8538 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8539 struct amdgpu_i2c_adapter *i2c; 8540 8541 /* Not needed for writeback connector */ 8542 link->priv = aconnector; 8543 8544 8545 i2c = create_i2c(link->ddc, false); 8546 if (!i2c) { 8547 drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n"); 8548 return -ENOMEM; 8549 } 8550 8551 aconnector->i2c = i2c; 8552 res = i2c_add_adapter(&i2c->base); 8553 8554 if (res) { 8555 drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); 8556 goto out_free; 8557 } 8558 8559 connector_type = to_drm_connector_type(link->connector_signal); 8560 8561 res = drm_connector_init_with_ddc( 8562 dm->ddev, 8563 &aconnector->base, 8564 &amdgpu_dm_connector_funcs, 8565 connector_type, 8566 &i2c->base); 8567 8568 if (res) { 8569 drm_err(adev_to_drm(dm->adev), "connector_init failed\n"); 8570 aconnector->connector_id = -1; 8571 goto out_free; 8572 } 8573 8574 drm_connector_helper_add( 8575 &aconnector->base, 8576 &amdgpu_dm_connector_helper_funcs); 8577 8578 amdgpu_dm_connector_init_helper( 8579 dm, 8580 aconnector, 8581 connector_type, 8582 link, 8583 link_index); 8584 8585 drm_connector_attach_encoder( 8586 &aconnector->base, &aencoder->base); 8587 8588 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8589 connector_type == DRM_MODE_CONNECTOR_HDMIB) 8590 amdgpu_dm_initialize_hdmi_connector(aconnector); 8591 8592 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8593 || connector_type == DRM_MODE_CONNECTOR_eDP) 8594 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8595 8596 out_free: 8597 if (res) { 8598 kfree(i2c); 8599 aconnector->i2c = NULL; 8600 } 8601 return res; 8602 } 8603 8604 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8605 { 8606 switch (adev->mode_info.num_crtc) { 8607 case 1: 8608 return 0x1; 8609 case 2: 8610 return 0x3; 8611 case 3: 8612 return 0x7; 8613 case 4: 8614 return 0xf; 8615 case 5: 8616 return 0x1f; 8617 case 6: 8618 default: 8619 return 0x3f; 8620 } 8621 } 8622 8623 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8624 struct amdgpu_encoder *aencoder, 8625 uint32_t link_index) 8626 { 8627 struct amdgpu_device *adev = drm_to_adev(dev); 8628 8629 int res = drm_encoder_init(dev, 8630 &aencoder->base, 8631 &amdgpu_dm_encoder_funcs, 8632 DRM_MODE_ENCODER_TMDS, 8633 NULL); 8634 8635 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8636 8637 if (!res) 8638 aencoder->encoder_id = link_index; 8639 else 8640 aencoder->encoder_id = -1; 8641 8642 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8643 8644 return res; 8645 } 8646 8647 static void manage_dm_interrupts(struct amdgpu_device *adev, 8648 struct amdgpu_crtc *acrtc, 8649 struct dm_crtc_state *acrtc_state) 8650 { 8651 struct drm_vblank_crtc_config config = {0}; 8652 struct dc_crtc_timing *timing; 8653 int offdelay; 8654 8655 if (acrtc_state) { 8656 timing = &acrtc_state->stream->timing; 8657 8658 /* 8659 * Depending on when the HW latching event of double-buffered 8660 * registers happen relative to the PSR SDP deadline, and how 8661 * bad the Panel clock has drifted since the last ALPM off 8662 * event, there can be up to 3 frames of delay between sending 8663 * the PSR exit cmd to DMUB fw, and when the panel starts 8664 * displaying live frames. 8665 * 8666 * We can set: 8667 * 8668 * 20/100 * offdelay_ms = 3_frames_ms 8669 * => offdelay_ms = 5 * 3_frames_ms 8670 * 8671 * This ensures that `3_frames_ms` will only be experienced as a 8672 * 20% delay on top how long the display has been static, and 8673 * thus make the delay less perceivable. 8674 */ 8675 if (acrtc_state->stream->link->psr_settings.psr_version < 8676 DC_PSR_VERSION_UNSUPPORTED) { 8677 offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * 8678 timing->v_total * 8679 timing->h_total, 8680 timing->pix_clk_100hz); 8681 config.offdelay_ms = offdelay ?: 30; 8682 } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8683 IP_VERSION(3, 5, 0) || 8684 !(adev->flags & AMD_IS_APU)) { 8685 /* 8686 * Older HW and DGPU have issues with instant off; 8687 * use a 2 frame offdelay. 8688 */ 8689 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8690 timing->v_total * 8691 timing->h_total, 8692 timing->pix_clk_100hz); 8693 8694 config.offdelay_ms = offdelay ?: 30; 8695 } else { 8696 /* offdelay_ms = 0 will never disable vblank */ 8697 config.offdelay_ms = 1; 8698 config.disable_immediate = true; 8699 } 8700 8701 drm_crtc_vblank_on_config(&acrtc->base, 8702 &config); 8703 } else { 8704 drm_crtc_vblank_off(&acrtc->base); 8705 } 8706 } 8707 8708 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8709 struct amdgpu_crtc *acrtc) 8710 { 8711 int irq_type = 8712 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8713 8714 /** 8715 * This reads the current state for the IRQ and force reapplies 8716 * the setting to hardware. 8717 */ 8718 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8719 } 8720 8721 static bool 8722 is_scaling_state_different(const struct dm_connector_state *dm_state, 8723 const struct dm_connector_state *old_dm_state) 8724 { 8725 if (dm_state->scaling != old_dm_state->scaling) 8726 return true; 8727 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8728 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8729 return true; 8730 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8731 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8732 return true; 8733 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8734 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8735 return true; 8736 return false; 8737 } 8738 8739 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 8740 struct drm_crtc_state *old_crtc_state, 8741 struct drm_connector_state *new_conn_state, 8742 struct drm_connector_state *old_conn_state, 8743 const struct drm_connector *connector, 8744 struct hdcp_workqueue *hdcp_w) 8745 { 8746 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8747 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8748 8749 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8750 connector->index, connector->status, connector->dpms); 8751 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8752 old_conn_state->content_protection, new_conn_state->content_protection); 8753 8754 if (old_crtc_state) 8755 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8756 old_crtc_state->enable, 8757 old_crtc_state->active, 8758 old_crtc_state->mode_changed, 8759 old_crtc_state->active_changed, 8760 old_crtc_state->connectors_changed); 8761 8762 if (new_crtc_state) 8763 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8764 new_crtc_state->enable, 8765 new_crtc_state->active, 8766 new_crtc_state->mode_changed, 8767 new_crtc_state->active_changed, 8768 new_crtc_state->connectors_changed); 8769 8770 /* hdcp content type change */ 8771 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 8772 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8773 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8774 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 8775 return true; 8776 } 8777 8778 /* CP is being re enabled, ignore this */ 8779 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8780 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8781 if (new_crtc_state && new_crtc_state->mode_changed) { 8782 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8783 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 8784 return true; 8785 } 8786 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8787 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 8788 return false; 8789 } 8790 8791 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8792 * 8793 * Handles: UNDESIRED -> ENABLED 8794 */ 8795 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8796 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8797 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8798 8799 /* Stream removed and re-enabled 8800 * 8801 * Can sometimes overlap with the HPD case, 8802 * thus set update_hdcp to false to avoid 8803 * setting HDCP multiple times. 8804 * 8805 * Handles: DESIRED -> DESIRED (Special case) 8806 */ 8807 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 8808 new_conn_state->crtc && new_conn_state->crtc->enabled && 8809 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8810 dm_con_state->update_hdcp = false; 8811 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 8812 __func__); 8813 return true; 8814 } 8815 8816 /* Hot-plug, headless s3, dpms 8817 * 8818 * Only start HDCP if the display is connected/enabled. 8819 * update_hdcp flag will be set to false until the next 8820 * HPD comes in. 8821 * 8822 * Handles: DESIRED -> DESIRED (Special case) 8823 */ 8824 if (dm_con_state->update_hdcp && 8825 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8826 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8827 dm_con_state->update_hdcp = false; 8828 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 8829 __func__); 8830 return true; 8831 } 8832 8833 if (old_conn_state->content_protection == new_conn_state->content_protection) { 8834 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8835 if (new_crtc_state && new_crtc_state->mode_changed) { 8836 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 8837 __func__); 8838 return true; 8839 } 8840 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 8841 __func__); 8842 return false; 8843 } 8844 8845 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 8846 return false; 8847 } 8848 8849 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8850 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 8851 __func__); 8852 return true; 8853 } 8854 8855 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 8856 return false; 8857 } 8858 8859 static void remove_stream(struct amdgpu_device *adev, 8860 struct amdgpu_crtc *acrtc, 8861 struct dc_stream_state *stream) 8862 { 8863 /* this is the update mode case */ 8864 8865 acrtc->otg_inst = -1; 8866 acrtc->enabled = false; 8867 } 8868 8869 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8870 { 8871 8872 assert_spin_locked(&acrtc->base.dev->event_lock); 8873 WARN_ON(acrtc->event); 8874 8875 acrtc->event = acrtc->base.state->event; 8876 8877 /* Set the flip status */ 8878 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8879 8880 /* Mark this event as consumed */ 8881 acrtc->base.state->event = NULL; 8882 8883 drm_dbg_state(acrtc->base.dev, 8884 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8885 acrtc->crtc_id); 8886 } 8887 8888 static void update_freesync_state_on_stream( 8889 struct amdgpu_display_manager *dm, 8890 struct dm_crtc_state *new_crtc_state, 8891 struct dc_stream_state *new_stream, 8892 struct dc_plane_state *surface, 8893 u32 flip_timestamp_in_us) 8894 { 8895 struct mod_vrr_params vrr_params; 8896 struct dc_info_packet vrr_infopacket = {0}; 8897 struct amdgpu_device *adev = dm->adev; 8898 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8899 unsigned long flags; 8900 bool pack_sdp_v1_3 = false; 8901 struct amdgpu_dm_connector *aconn; 8902 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 8903 8904 if (!new_stream) 8905 return; 8906 8907 /* 8908 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8909 * For now it's sufficient to just guard against these conditions. 8910 */ 8911 8912 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8913 return; 8914 8915 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8916 vrr_params = acrtc->dm_irq_params.vrr_params; 8917 8918 if (surface) { 8919 mod_freesync_handle_preflip( 8920 dm->freesync_module, 8921 surface, 8922 new_stream, 8923 flip_timestamp_in_us, 8924 &vrr_params); 8925 8926 if (adev->family < AMDGPU_FAMILY_AI && 8927 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 8928 mod_freesync_handle_v_update(dm->freesync_module, 8929 new_stream, &vrr_params); 8930 8931 /* Need to call this before the frame ends. */ 8932 dc_stream_adjust_vmin_vmax(dm->dc, 8933 new_crtc_state->stream, 8934 &vrr_params.adjust); 8935 } 8936 } 8937 8938 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 8939 8940 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 8941 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 8942 8943 if (aconn->vsdb_info.amd_vsdb_version == 1) 8944 packet_type = PACKET_TYPE_FS_V1; 8945 else if (aconn->vsdb_info.amd_vsdb_version == 2) 8946 packet_type = PACKET_TYPE_FS_V2; 8947 else if (aconn->vsdb_info.amd_vsdb_version == 3) 8948 packet_type = PACKET_TYPE_FS_V3; 8949 8950 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 8951 &new_stream->adaptive_sync_infopacket); 8952 } 8953 8954 mod_freesync_build_vrr_infopacket( 8955 dm->freesync_module, 8956 new_stream, 8957 &vrr_params, 8958 packet_type, 8959 TRANSFER_FUNC_UNKNOWN, 8960 &vrr_infopacket, 8961 pack_sdp_v1_3); 8962 8963 new_crtc_state->freesync_vrr_info_changed |= 8964 (memcmp(&new_crtc_state->vrr_infopacket, 8965 &vrr_infopacket, 8966 sizeof(vrr_infopacket)) != 0); 8967 8968 acrtc->dm_irq_params.vrr_params = vrr_params; 8969 new_crtc_state->vrr_infopacket = vrr_infopacket; 8970 8971 new_stream->vrr_infopacket = vrr_infopacket; 8972 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 8973 8974 if (new_crtc_state->freesync_vrr_info_changed) 8975 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8976 new_crtc_state->base.crtc->base.id, 8977 (int)new_crtc_state->base.vrr_enabled, 8978 (int)vrr_params.state); 8979 8980 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8981 } 8982 8983 static void update_stream_irq_parameters( 8984 struct amdgpu_display_manager *dm, 8985 struct dm_crtc_state *new_crtc_state) 8986 { 8987 struct dc_stream_state *new_stream = new_crtc_state->stream; 8988 struct mod_vrr_params vrr_params; 8989 struct mod_freesync_config config = new_crtc_state->freesync_config; 8990 struct amdgpu_device *adev = dm->adev; 8991 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8992 unsigned long flags; 8993 8994 if (!new_stream) 8995 return; 8996 8997 /* 8998 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8999 * For now it's sufficient to just guard against these conditions. 9000 */ 9001 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9002 return; 9003 9004 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9005 vrr_params = acrtc->dm_irq_params.vrr_params; 9006 9007 if (new_crtc_state->vrr_supported && 9008 config.min_refresh_in_uhz && 9009 config.max_refresh_in_uhz) { 9010 /* 9011 * if freesync compatible mode was set, config.state will be set 9012 * in atomic check 9013 */ 9014 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 9015 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 9016 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 9017 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 9018 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 9019 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 9020 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 9021 } else { 9022 config.state = new_crtc_state->base.vrr_enabled ? 9023 VRR_STATE_ACTIVE_VARIABLE : 9024 VRR_STATE_INACTIVE; 9025 } 9026 } else { 9027 config.state = VRR_STATE_UNSUPPORTED; 9028 } 9029 9030 mod_freesync_build_vrr_params(dm->freesync_module, 9031 new_stream, 9032 &config, &vrr_params); 9033 9034 new_crtc_state->freesync_config = config; 9035 /* Copy state for access from DM IRQ handler */ 9036 acrtc->dm_irq_params.freesync_config = config; 9037 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 9038 acrtc->dm_irq_params.vrr_params = vrr_params; 9039 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9040 } 9041 9042 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 9043 struct dm_crtc_state *new_state) 9044 { 9045 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 9046 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 9047 9048 if (!old_vrr_active && new_vrr_active) { 9049 /* Transition VRR inactive -> active: 9050 * While VRR is active, we must not disable vblank irq, as a 9051 * reenable after disable would compute bogus vblank/pflip 9052 * timestamps if it likely happened inside display front-porch. 9053 * 9054 * We also need vupdate irq for the actual core vblank handling 9055 * at end of vblank. 9056 */ 9057 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 9058 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 9059 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", 9060 __func__, new_state->base.crtc->base.id); 9061 } else if (old_vrr_active && !new_vrr_active) { 9062 /* Transition VRR active -> inactive: 9063 * Allow vblank irq disable again for fixed refresh rate. 9064 */ 9065 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 9066 drm_crtc_vblank_put(new_state->base.crtc); 9067 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", 9068 __func__, new_state->base.crtc->base.id); 9069 } 9070 } 9071 9072 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 9073 { 9074 struct drm_plane *plane; 9075 struct drm_plane_state *old_plane_state; 9076 int i; 9077 9078 /* 9079 * TODO: Make this per-stream so we don't issue redundant updates for 9080 * commits with multiple streams. 9081 */ 9082 for_each_old_plane_in_state(state, plane, old_plane_state, i) 9083 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9084 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 9085 } 9086 9087 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 9088 { 9089 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 9090 9091 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 9092 } 9093 9094 static void amdgpu_dm_update_cursor(struct drm_plane *plane, 9095 struct drm_plane_state *old_plane_state, 9096 struct dc_stream_update *update) 9097 { 9098 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9099 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 9100 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 9101 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 9102 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 9103 uint64_t address = afb ? afb->address : 0; 9104 struct dc_cursor_position position = {0}; 9105 struct dc_cursor_attributes attributes; 9106 int ret; 9107 9108 if (!plane->state->fb && !old_plane_state->fb) 9109 return; 9110 9111 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 9112 amdgpu_crtc->crtc_id, plane->state->crtc_w, 9113 plane->state->crtc_h); 9114 9115 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 9116 if (ret) 9117 return; 9118 9119 if (!position.enable) { 9120 /* turn off cursor */ 9121 if (crtc_state && crtc_state->stream) { 9122 dc_stream_set_cursor_position(crtc_state->stream, 9123 &position); 9124 update->cursor_position = &crtc_state->stream->cursor_position; 9125 } 9126 return; 9127 } 9128 9129 amdgpu_crtc->cursor_width = plane->state->crtc_w; 9130 amdgpu_crtc->cursor_height = plane->state->crtc_h; 9131 9132 memset(&attributes, 0, sizeof(attributes)); 9133 attributes.address.high_part = upper_32_bits(address); 9134 attributes.address.low_part = lower_32_bits(address); 9135 attributes.width = plane->state->crtc_w; 9136 attributes.height = plane->state->crtc_h; 9137 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 9138 attributes.rotation_angle = 0; 9139 attributes.attribute_flags.value = 0; 9140 9141 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 9142 * legacy gamma setup. 9143 */ 9144 if (crtc_state->cm_is_degamma_srgb && 9145 adev->dm.dc->caps.color.dpp.gamma_corr) 9146 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 9147 9148 if (afb) 9149 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 9150 9151 if (crtc_state->stream) { 9152 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 9153 &attributes)) 9154 drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n"); 9155 9156 update->cursor_attributes = &crtc_state->stream->cursor_attributes; 9157 9158 if (!dc_stream_set_cursor_position(crtc_state->stream, 9159 &position)) 9160 drm_err(adev_to_drm(adev), "DC failed to set cursor position\n"); 9161 9162 update->cursor_position = &crtc_state->stream->cursor_position; 9163 } 9164 } 9165 9166 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 9167 const struct dm_crtc_state *acrtc_state, 9168 const u64 current_ts) 9169 { 9170 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 9171 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 9172 struct amdgpu_dm_connector *aconn = 9173 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9174 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 9175 9176 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9177 if (pr->config.replay_supported && !pr->replay_feature_enabled) 9178 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 9179 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 9180 !psr->psr_feature_enabled) 9181 if (!aconn->disallow_edp_enter_psr) 9182 amdgpu_dm_link_setup_psr(acrtc_state->stream); 9183 } 9184 9185 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 9186 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9187 (psr->psr_feature_enabled || pr->config.replay_supported)) { 9188 if (aconn->sr_skip_count > 0) 9189 aconn->sr_skip_count--; 9190 9191 /* Allow SR when skip count is 0. */ 9192 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 9193 9194 /* 9195 * If sink supports PSR SU/Panel Replay, there is no need to rely on 9196 * a vblank event disable request to enable PSR/RP. PSR SU/RP 9197 * can be enabled immediately once OS demonstrates an 9198 * adequate number of fast atomic commits to notify KMD 9199 * of update events. See `vblank_control_worker()`. 9200 */ 9201 if (!vrr_active && 9202 acrtc_attach->dm_irq_params.allow_sr_entry && 9203 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9204 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9205 #endif 9206 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 9207 if (pr->replay_feature_enabled && !pr->replay_allow_active) 9208 amdgpu_dm_replay_enable(acrtc_state->stream, true); 9209 if (psr->psr_version == DC_PSR_VERSION_SU_1 && 9210 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 9211 amdgpu_dm_psr_enable(acrtc_state->stream); 9212 } 9213 } else { 9214 acrtc_attach->dm_irq_params.allow_sr_entry = false; 9215 } 9216 } 9217 9218 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 9219 struct drm_device *dev, 9220 struct amdgpu_display_manager *dm, 9221 struct drm_crtc *pcrtc, 9222 bool wait_for_vblank) 9223 { 9224 u32 i; 9225 u64 timestamp_ns = ktime_get_ns(); 9226 struct drm_plane *plane; 9227 struct drm_plane_state *old_plane_state, *new_plane_state; 9228 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 9229 struct drm_crtc_state *new_pcrtc_state = 9230 drm_atomic_get_new_crtc_state(state, pcrtc); 9231 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 9232 struct dm_crtc_state *dm_old_crtc_state = 9233 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 9234 int planes_count = 0, vpos, hpos; 9235 unsigned long flags; 9236 u32 target_vblank, last_flip_vblank; 9237 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 9238 bool cursor_update = false; 9239 bool pflip_present = false; 9240 bool dirty_rects_changed = false; 9241 bool updated_planes_and_streams = false; 9242 struct { 9243 struct dc_surface_update surface_updates[MAX_SURFACES]; 9244 struct dc_plane_info plane_infos[MAX_SURFACES]; 9245 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 9246 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 9247 struct dc_stream_update stream_update; 9248 } *bundle; 9249 9250 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 9251 9252 if (!bundle) { 9253 drm_err(dev, "Failed to allocate update bundle\n"); 9254 goto cleanup; 9255 } 9256 9257 /* 9258 * Disable the cursor first if we're disabling all the planes. 9259 * It'll remain on the screen after the planes are re-enabled 9260 * if we don't. 9261 * 9262 * If the cursor is transitioning from native to overlay mode, the 9263 * native cursor needs to be disabled first. 9264 */ 9265 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && 9266 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9267 struct dc_cursor_position cursor_position = {0}; 9268 9269 if (!dc_stream_set_cursor_position(acrtc_state->stream, 9270 &cursor_position)) 9271 drm_err(dev, "DC failed to disable native cursor\n"); 9272 9273 bundle->stream_update.cursor_position = 9274 &acrtc_state->stream->cursor_position; 9275 } 9276 9277 if (acrtc_state->active_planes == 0 && 9278 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9279 amdgpu_dm_commit_cursors(state); 9280 9281 /* update planes when needed */ 9282 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9283 struct drm_crtc *crtc = new_plane_state->crtc; 9284 struct drm_crtc_state *new_crtc_state; 9285 struct drm_framebuffer *fb = new_plane_state->fb; 9286 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9287 bool plane_needs_flip; 9288 struct dc_plane_state *dc_plane; 9289 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9290 9291 /* Cursor plane is handled after stream updates */ 9292 if (plane->type == DRM_PLANE_TYPE_CURSOR && 9293 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { 9294 if ((fb && crtc == pcrtc) || 9295 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { 9296 cursor_update = true; 9297 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) 9298 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); 9299 } 9300 9301 continue; 9302 } 9303 9304 if (!fb || !crtc || pcrtc != crtc) 9305 continue; 9306 9307 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9308 if (!new_crtc_state->active) 9309 continue; 9310 9311 dc_plane = dm_new_plane_state->dc_state; 9312 if (!dc_plane) 9313 continue; 9314 9315 bundle->surface_updates[planes_count].surface = dc_plane; 9316 if (new_pcrtc_state->color_mgmt_changed) { 9317 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; 9318 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; 9319 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9320 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; 9321 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; 9322 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; 9323 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; 9324 } 9325 9326 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 9327 &bundle->scaling_infos[planes_count]); 9328 9329 bundle->surface_updates[planes_count].scaling_info = 9330 &bundle->scaling_infos[planes_count]; 9331 9332 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9333 9334 pflip_present = pflip_present || plane_needs_flip; 9335 9336 if (!plane_needs_flip) { 9337 planes_count += 1; 9338 continue; 9339 } 9340 9341 fill_dc_plane_info_and_addr( 9342 dm->adev, new_plane_state, 9343 afb->tiling_flags, 9344 &bundle->plane_infos[planes_count], 9345 &bundle->flip_addrs[planes_count].address, 9346 afb->tmz_surface); 9347 9348 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9349 new_plane_state->plane->index, 9350 bundle->plane_infos[planes_count].dcc.enable); 9351 9352 bundle->surface_updates[planes_count].plane_info = 9353 &bundle->plane_infos[planes_count]; 9354 9355 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 9356 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9357 fill_dc_dirty_rects(plane, old_plane_state, 9358 new_plane_state, new_crtc_state, 9359 &bundle->flip_addrs[planes_count], 9360 acrtc_state->stream->link->psr_settings.psr_version == 9361 DC_PSR_VERSION_SU_1, 9362 &dirty_rects_changed); 9363 9364 /* 9365 * If the dirty regions changed, PSR-SU need to be disabled temporarily 9366 * and enabled it again after dirty regions are stable to avoid video glitch. 9367 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 9368 * during the PSR-SU was disabled. 9369 */ 9370 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9371 acrtc_attach->dm_irq_params.allow_sr_entry && 9372 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9373 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9374 #endif 9375 dirty_rects_changed) { 9376 mutex_lock(&dm->dc_lock); 9377 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 9378 timestamp_ns; 9379 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9380 amdgpu_dm_psr_disable(acrtc_state->stream, true); 9381 mutex_unlock(&dm->dc_lock); 9382 } 9383 } 9384 9385 /* 9386 * Only allow immediate flips for fast updates that don't 9387 * change memory domain, FB pitch, DCC state, rotation or 9388 * mirroring. 9389 * 9390 * dm_crtc_helper_atomic_check() only accepts async flips with 9391 * fast updates. 9392 */ 9393 if (crtc->state->async_flip && 9394 (acrtc_state->update_type != UPDATE_TYPE_FAST || 9395 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 9396 drm_warn_once(state->dev, 9397 "[PLANE:%d:%s] async flip with non-fast update\n", 9398 plane->base.id, plane->name); 9399 9400 bundle->flip_addrs[planes_count].flip_immediate = 9401 crtc->state->async_flip && 9402 acrtc_state->update_type == UPDATE_TYPE_FAST && 9403 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 9404 9405 timestamp_ns = ktime_get_ns(); 9406 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9407 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9408 bundle->surface_updates[planes_count].surface = dc_plane; 9409 9410 if (!bundle->surface_updates[planes_count].surface) { 9411 drm_err(dev, "No surface for CRTC: id=%d\n", 9412 acrtc_attach->crtc_id); 9413 continue; 9414 } 9415 9416 if (plane == pcrtc->primary) 9417 update_freesync_state_on_stream( 9418 dm, 9419 acrtc_state, 9420 acrtc_state->stream, 9421 dc_plane, 9422 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9423 9424 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9425 __func__, 9426 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9427 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9428 9429 planes_count += 1; 9430 9431 } 9432 9433 if (pflip_present) { 9434 if (!vrr_active) { 9435 /* Use old throttling in non-vrr fixed refresh rate mode 9436 * to keep flip scheduling based on target vblank counts 9437 * working in a backwards compatible way, e.g., for 9438 * clients using the GLX_OML_sync_control extension or 9439 * DRI3/Present extension with defined target_msc. 9440 */ 9441 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9442 } else { 9443 /* For variable refresh rate mode only: 9444 * Get vblank of last completed flip to avoid > 1 vrr 9445 * flips per video frame by use of throttling, but allow 9446 * flip programming anywhere in the possibly large 9447 * variable vrr vblank interval for fine-grained flip 9448 * timing control and more opportunity to avoid stutter 9449 * on late submission of flips. 9450 */ 9451 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9452 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9453 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9454 } 9455 9456 target_vblank = last_flip_vblank + wait_for_vblank; 9457 9458 /* 9459 * Wait until we're out of the vertical blank period before the one 9460 * targeted by the flip 9461 */ 9462 while ((acrtc_attach->enabled && 9463 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9464 0, &vpos, &hpos, NULL, 9465 NULL, &pcrtc->hwmode) 9466 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9467 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9468 (int)(target_vblank - 9469 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9470 usleep_range(1000, 1100); 9471 } 9472 9473 /** 9474 * Prepare the flip event for the pageflip interrupt to handle. 9475 * 9476 * This only works in the case where we've already turned on the 9477 * appropriate hardware blocks (eg. HUBP) so in the transition case 9478 * from 0 -> n planes we have to skip a hardware generated event 9479 * and rely on sending it from software. 9480 */ 9481 if (acrtc_attach->base.state->event && 9482 acrtc_state->active_planes > 0) { 9483 drm_crtc_vblank_get(pcrtc); 9484 9485 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9486 9487 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9488 prepare_flip_isr(acrtc_attach); 9489 9490 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9491 } 9492 9493 if (acrtc_state->stream) { 9494 if (acrtc_state->freesync_vrr_info_changed) 9495 bundle->stream_update.vrr_infopacket = 9496 &acrtc_state->stream->vrr_infopacket; 9497 } 9498 } else if (cursor_update && acrtc_state->active_planes > 0) { 9499 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9500 if (acrtc_attach->base.state->event) { 9501 drm_crtc_vblank_get(pcrtc); 9502 acrtc_attach->event = acrtc_attach->base.state->event; 9503 acrtc_attach->base.state->event = NULL; 9504 } 9505 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9506 } 9507 9508 /* Update the planes if changed or disable if we don't have any. */ 9509 if ((planes_count || acrtc_state->active_planes == 0) && 9510 acrtc_state->stream) { 9511 /* 9512 * If PSR or idle optimizations are enabled then flush out 9513 * any pending work before hardware programming. 9514 */ 9515 if (dm->vblank_control_workqueue) 9516 flush_workqueue(dm->vblank_control_workqueue); 9517 9518 bundle->stream_update.stream = acrtc_state->stream; 9519 if (new_pcrtc_state->mode_changed) { 9520 bundle->stream_update.src = acrtc_state->stream->src; 9521 bundle->stream_update.dst = acrtc_state->stream->dst; 9522 } 9523 9524 if (new_pcrtc_state->color_mgmt_changed) { 9525 /* 9526 * TODO: This isn't fully correct since we've actually 9527 * already modified the stream in place. 9528 */ 9529 bundle->stream_update.gamut_remap = 9530 &acrtc_state->stream->gamut_remap_matrix; 9531 bundle->stream_update.output_csc_transform = 9532 &acrtc_state->stream->csc_color_matrix; 9533 bundle->stream_update.out_transfer_func = 9534 &acrtc_state->stream->out_transfer_func; 9535 bundle->stream_update.lut3d_func = 9536 (struct dc_3dlut *) acrtc_state->stream->lut3d_func; 9537 bundle->stream_update.func_shaper = 9538 (struct dc_transfer_func *) acrtc_state->stream->func_shaper; 9539 } 9540 9541 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9542 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9543 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9544 9545 mutex_lock(&dm->dc_lock); 9546 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) { 9547 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9548 amdgpu_dm_replay_disable(acrtc_state->stream); 9549 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9550 amdgpu_dm_psr_disable(acrtc_state->stream, true); 9551 } 9552 mutex_unlock(&dm->dc_lock); 9553 9554 /* 9555 * If FreeSync state on the stream has changed then we need to 9556 * re-adjust the min/max bounds now that DC doesn't handle this 9557 * as part of commit. 9558 */ 9559 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9560 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9561 dc_stream_adjust_vmin_vmax( 9562 dm->dc, acrtc_state->stream, 9563 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9564 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9565 } 9566 mutex_lock(&dm->dc_lock); 9567 update_planes_and_stream_adapter(dm->dc, 9568 acrtc_state->update_type, 9569 planes_count, 9570 acrtc_state->stream, 9571 &bundle->stream_update, 9572 bundle->surface_updates); 9573 updated_planes_and_streams = true; 9574 9575 /** 9576 * Enable or disable the interrupts on the backend. 9577 * 9578 * Most pipes are put into power gating when unused. 9579 * 9580 * When power gating is enabled on a pipe we lose the 9581 * interrupt enablement state when power gating is disabled. 9582 * 9583 * So we need to update the IRQ control state in hardware 9584 * whenever the pipe turns on (since it could be previously 9585 * power gated) or off (since some pipes can't be power gated 9586 * on some ASICs). 9587 */ 9588 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9589 dm_update_pflip_irq_state(drm_to_adev(dev), 9590 acrtc_attach); 9591 9592 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 9593 mutex_unlock(&dm->dc_lock); 9594 } 9595 9596 /* 9597 * Update cursor state *after* programming all the planes. 9598 * This avoids redundant programming in the case where we're going 9599 * to be disabling a single plane - those pipes are being disabled. 9600 */ 9601 if (acrtc_state->active_planes && 9602 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && 9603 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) 9604 amdgpu_dm_commit_cursors(state); 9605 9606 cleanup: 9607 kfree(bundle); 9608 } 9609 9610 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9611 struct drm_atomic_state *state) 9612 { 9613 struct amdgpu_device *adev = drm_to_adev(dev); 9614 struct amdgpu_dm_connector *aconnector; 9615 struct drm_connector *connector; 9616 struct drm_connector_state *old_con_state, *new_con_state; 9617 struct drm_crtc_state *new_crtc_state; 9618 struct dm_crtc_state *new_dm_crtc_state; 9619 const struct dc_stream_status *status; 9620 int i, inst; 9621 9622 /* Notify device removals. */ 9623 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9624 if (old_con_state->crtc != new_con_state->crtc) { 9625 /* CRTC changes require notification. */ 9626 goto notify; 9627 } 9628 9629 if (!new_con_state->crtc) 9630 continue; 9631 9632 new_crtc_state = drm_atomic_get_new_crtc_state( 9633 state, new_con_state->crtc); 9634 9635 if (!new_crtc_state) 9636 continue; 9637 9638 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9639 continue; 9640 9641 notify: 9642 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9643 continue; 9644 9645 aconnector = to_amdgpu_dm_connector(connector); 9646 9647 mutex_lock(&adev->dm.audio_lock); 9648 inst = aconnector->audio_inst; 9649 aconnector->audio_inst = -1; 9650 mutex_unlock(&adev->dm.audio_lock); 9651 9652 amdgpu_dm_audio_eld_notify(adev, inst); 9653 } 9654 9655 /* Notify audio device additions. */ 9656 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9657 if (!new_con_state->crtc) 9658 continue; 9659 9660 new_crtc_state = drm_atomic_get_new_crtc_state( 9661 state, new_con_state->crtc); 9662 9663 if (!new_crtc_state) 9664 continue; 9665 9666 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9667 continue; 9668 9669 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9670 if (!new_dm_crtc_state->stream) 9671 continue; 9672 9673 status = dc_stream_get_status(new_dm_crtc_state->stream); 9674 if (!status) 9675 continue; 9676 9677 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 9678 continue; 9679 9680 aconnector = to_amdgpu_dm_connector(connector); 9681 9682 mutex_lock(&adev->dm.audio_lock); 9683 inst = status->audio_inst; 9684 aconnector->audio_inst = inst; 9685 mutex_unlock(&adev->dm.audio_lock); 9686 9687 amdgpu_dm_audio_eld_notify(adev, inst); 9688 } 9689 } 9690 9691 /* 9692 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9693 * @crtc_state: the DRM CRTC state 9694 * @stream_state: the DC stream state. 9695 * 9696 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9697 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9698 */ 9699 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9700 struct dc_stream_state *stream_state) 9701 { 9702 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9703 } 9704 9705 static void dm_clear_writeback(struct amdgpu_display_manager *dm, 9706 struct dm_crtc_state *crtc_state) 9707 { 9708 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); 9709 } 9710 9711 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 9712 struct dc_state *dc_state) 9713 { 9714 struct drm_device *dev = state->dev; 9715 struct amdgpu_device *adev = drm_to_adev(dev); 9716 struct amdgpu_display_manager *dm = &adev->dm; 9717 struct drm_crtc *crtc; 9718 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9719 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9720 struct drm_connector_state *old_con_state; 9721 struct drm_connector *connector; 9722 bool mode_set_reset_required = false; 9723 u32 i; 9724 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 9725 bool set_backlight_level = false; 9726 9727 /* Disable writeback */ 9728 for_each_old_connector_in_state(state, connector, old_con_state, i) { 9729 struct dm_connector_state *dm_old_con_state; 9730 struct amdgpu_crtc *acrtc; 9731 9732 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 9733 continue; 9734 9735 old_crtc_state = NULL; 9736 9737 dm_old_con_state = to_dm_connector_state(old_con_state); 9738 if (!dm_old_con_state->base.crtc) 9739 continue; 9740 9741 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); 9742 if (acrtc) 9743 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9744 9745 if (!acrtc || !acrtc->wb_enabled) 9746 continue; 9747 9748 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9749 9750 dm_clear_writeback(dm, dm_old_crtc_state); 9751 acrtc->wb_enabled = false; 9752 } 9753 9754 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9755 new_crtc_state, i) { 9756 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9757 9758 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9759 9760 if (old_crtc_state->active && 9761 (!new_crtc_state->active || 9762 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9763 manage_dm_interrupts(adev, acrtc, NULL); 9764 dc_stream_release(dm_old_crtc_state->stream); 9765 } 9766 } 9767 9768 drm_atomic_helper_calc_timestamping_constants(state); 9769 9770 /* update changed items */ 9771 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9772 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9773 9774 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9775 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9776 9777 drm_dbg_state(state->dev, 9778 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9779 acrtc->crtc_id, 9780 new_crtc_state->enable, 9781 new_crtc_state->active, 9782 new_crtc_state->planes_changed, 9783 new_crtc_state->mode_changed, 9784 new_crtc_state->active_changed, 9785 new_crtc_state->connectors_changed); 9786 9787 /* Disable cursor if disabling crtc */ 9788 if (old_crtc_state->active && !new_crtc_state->active) { 9789 struct dc_cursor_position position; 9790 9791 memset(&position, 0, sizeof(position)); 9792 mutex_lock(&dm->dc_lock); 9793 dc_exit_ips_for_hw_access(dm->dc); 9794 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); 9795 mutex_unlock(&dm->dc_lock); 9796 } 9797 9798 /* Copy all transient state flags into dc state */ 9799 if (dm_new_crtc_state->stream) { 9800 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9801 dm_new_crtc_state->stream); 9802 } 9803 9804 /* handles headless hotplug case, updating new_state and 9805 * aconnector as needed 9806 */ 9807 9808 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9809 9810 drm_dbg_atomic(dev, 9811 "Atomic commit: SET crtc id %d: [%p]\n", 9812 acrtc->crtc_id, acrtc); 9813 9814 if (!dm_new_crtc_state->stream) { 9815 /* 9816 * this could happen because of issues with 9817 * userspace notifications delivery. 9818 * In this case userspace tries to set mode on 9819 * display which is disconnected in fact. 9820 * dc_sink is NULL in this case on aconnector. 9821 * We expect reset mode will come soon. 9822 * 9823 * This can also happen when unplug is done 9824 * during resume sequence ended 9825 * 9826 * In this case, we want to pretend we still 9827 * have a sink to keep the pipe running so that 9828 * hw state is consistent with the sw state 9829 */ 9830 drm_dbg_atomic(dev, 9831 "Failed to create new stream for crtc %d\n", 9832 acrtc->base.base.id); 9833 continue; 9834 } 9835 9836 if (dm_old_crtc_state->stream) 9837 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9838 9839 pm_runtime_get_noresume(dev->dev); 9840 9841 acrtc->enabled = true; 9842 acrtc->hw_mode = new_crtc_state->mode; 9843 crtc->hwmode = new_crtc_state->mode; 9844 mode_set_reset_required = true; 9845 set_backlight_level = true; 9846 } else if (modereset_required(new_crtc_state)) { 9847 drm_dbg_atomic(dev, 9848 "Atomic commit: RESET. crtc id %d:[%p]\n", 9849 acrtc->crtc_id, acrtc); 9850 /* i.e. reset mode */ 9851 if (dm_old_crtc_state->stream) 9852 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9853 9854 mode_set_reset_required = true; 9855 } 9856 } /* for_each_crtc_in_state() */ 9857 9858 /* if there mode set or reset, disable eDP PSR, Replay */ 9859 if (mode_set_reset_required) { 9860 if (dm->vblank_control_workqueue) 9861 flush_workqueue(dm->vblank_control_workqueue); 9862 9863 amdgpu_dm_replay_disable_all(dm); 9864 amdgpu_dm_psr_disable_all(dm); 9865 } 9866 9867 dm_enable_per_frame_crtc_master_sync(dc_state); 9868 mutex_lock(&dm->dc_lock); 9869 dc_exit_ips_for_hw_access(dm->dc); 9870 WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); 9871 9872 /* Allow idle optimization when vblank count is 0 for display off */ 9873 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) 9874 dc_allow_idle_optimizations(dm->dc, true); 9875 mutex_unlock(&dm->dc_lock); 9876 9877 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9878 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9879 9880 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9881 9882 if (dm_new_crtc_state->stream != NULL) { 9883 const struct dc_stream_status *status = 9884 dc_stream_get_status(dm_new_crtc_state->stream); 9885 9886 if (!status) 9887 status = dc_state_get_stream_status(dc_state, 9888 dm_new_crtc_state->stream); 9889 if (!status) 9890 drm_err(dev, 9891 "got no status for stream %p on acrtc%p\n", 9892 dm_new_crtc_state->stream, acrtc); 9893 else 9894 acrtc->otg_inst = status->primary_otg_inst; 9895 } 9896 } 9897 9898 /* During boot up and resume the DC layer will reset the panel brightness 9899 * to fix a flicker issue. 9900 * It will cause the dm->actual_brightness is not the current panel brightness 9901 * level. (the dm->brightness is the correct panel level) 9902 * So we set the backlight level with dm->brightness value after set mode 9903 */ 9904 if (set_backlight_level) { 9905 for (i = 0; i < dm->num_of_edps; i++) { 9906 if (dm->backlight_dev[i]) 9907 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9908 } 9909 } 9910 } 9911 9912 static void dm_set_writeback(struct amdgpu_display_manager *dm, 9913 struct dm_crtc_state *crtc_state, 9914 struct drm_connector *connector, 9915 struct drm_connector_state *new_con_state) 9916 { 9917 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); 9918 struct amdgpu_device *adev = dm->adev; 9919 struct amdgpu_crtc *acrtc; 9920 struct dc_writeback_info *wb_info; 9921 struct pipe_ctx *pipe = NULL; 9922 struct amdgpu_framebuffer *afb; 9923 int i = 0; 9924 9925 wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); 9926 if (!wb_info) { 9927 drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n"); 9928 return; 9929 } 9930 9931 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); 9932 if (!acrtc) { 9933 drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n"); 9934 kfree(wb_info); 9935 return; 9936 } 9937 9938 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); 9939 if (!afb) { 9940 drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n"); 9941 kfree(wb_info); 9942 return; 9943 } 9944 9945 for (i = 0; i < MAX_PIPES; i++) { 9946 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { 9947 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; 9948 break; 9949 } 9950 } 9951 9952 /* fill in wb_info */ 9953 wb_info->wb_enabled = true; 9954 9955 wb_info->dwb_pipe_inst = 0; 9956 wb_info->dwb_params.dwbscl_black_color = 0; 9957 wb_info->dwb_params.hdr_mult = 0x1F000; 9958 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; 9959 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; 9960 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; 9961 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; 9962 9963 /* width & height from crtc */ 9964 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; 9965 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; 9966 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; 9967 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; 9968 9969 wb_info->dwb_params.cnv_params.crop_en = false; 9970 wb_info->dwb_params.stereo_params.stereo_enabled = false; 9971 9972 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits 9973 wb_info->dwb_params.cnv_params.out_min_pix_val = 0; 9974 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; 9975 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; 9976 9977 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; 9978 9979 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 9980 9981 wb_info->dwb_params.scaler_taps.h_taps = 4; 9982 wb_info->dwb_params.scaler_taps.v_taps = 4; 9983 wb_info->dwb_params.scaler_taps.h_taps_c = 2; 9984 wb_info->dwb_params.scaler_taps.v_taps_c = 2; 9985 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 9986 9987 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; 9988 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; 9989 9990 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { 9991 wb_info->mcif_buf_params.luma_address[i] = afb->address; 9992 wb_info->mcif_buf_params.chroma_address[i] = 0; 9993 } 9994 9995 wb_info->mcif_buf_params.p_vmid = 1; 9996 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { 9997 wb_info->mcif_warmup_params.start_address.quad_part = afb->address; 9998 wb_info->mcif_warmup_params.region_size = 9999 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; 10000 } 10001 wb_info->mcif_warmup_params.p_vmid = 1; 10002 wb_info->writeback_source_plane = pipe->plane_state; 10003 10004 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); 10005 10006 acrtc->wb_pending = true; 10007 acrtc->wb_conn = wb_conn; 10008 drm_writeback_queue_job(wb_conn, new_con_state); 10009 } 10010 10011 /** 10012 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 10013 * @state: The atomic state to commit 10014 * 10015 * This will tell DC to commit the constructed DC state from atomic_check, 10016 * programming the hardware. Any failures here implies a hardware failure, since 10017 * atomic check should have filtered anything non-kosher. 10018 */ 10019 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 10020 { 10021 struct drm_device *dev = state->dev; 10022 struct amdgpu_device *adev = drm_to_adev(dev); 10023 struct amdgpu_display_manager *dm = &adev->dm; 10024 struct dm_atomic_state *dm_state; 10025 struct dc_state *dc_state = NULL; 10026 u32 i, j; 10027 struct drm_crtc *crtc; 10028 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10029 unsigned long flags; 10030 bool wait_for_vblank = true; 10031 struct drm_connector *connector; 10032 struct drm_connector_state *old_con_state, *new_con_state; 10033 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10034 int crtc_disable_count = 0; 10035 10036 trace_amdgpu_dm_atomic_commit_tail_begin(state); 10037 10038 drm_atomic_helper_update_legacy_modeset_state(dev, state); 10039 drm_dp_mst_atomic_wait_for_dependencies(state); 10040 10041 dm_state = dm_atomic_get_new_state(state); 10042 if (dm_state && dm_state->context) { 10043 dc_state = dm_state->context; 10044 amdgpu_dm_commit_streams(state, dc_state); 10045 } 10046 10047 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10048 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10049 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10050 struct amdgpu_dm_connector *aconnector; 10051 10052 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10053 continue; 10054 10055 aconnector = to_amdgpu_dm_connector(connector); 10056 10057 if (!adev->dm.hdcp_workqueue) 10058 continue; 10059 10060 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 10061 10062 if (!connector) 10063 continue; 10064 10065 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 10066 connector->index, connector->status, connector->dpms); 10067 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 10068 old_con_state->content_protection, new_con_state->content_protection); 10069 10070 if (aconnector->dc_sink) { 10071 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 10072 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 10073 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 10074 aconnector->dc_sink->edid_caps.display_name); 10075 } 10076 } 10077 10078 new_crtc_state = NULL; 10079 old_crtc_state = NULL; 10080 10081 if (acrtc) { 10082 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10083 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10084 } 10085 10086 if (old_crtc_state) 10087 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 10088 old_crtc_state->enable, 10089 old_crtc_state->active, 10090 old_crtc_state->mode_changed, 10091 old_crtc_state->active_changed, 10092 old_crtc_state->connectors_changed); 10093 10094 if (new_crtc_state) 10095 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 10096 new_crtc_state->enable, 10097 new_crtc_state->active, 10098 new_crtc_state->mode_changed, 10099 new_crtc_state->active_changed, 10100 new_crtc_state->connectors_changed); 10101 } 10102 10103 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10104 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10105 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10106 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 10107 10108 if (!adev->dm.hdcp_workqueue) 10109 continue; 10110 10111 new_crtc_state = NULL; 10112 old_crtc_state = NULL; 10113 10114 if (acrtc) { 10115 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10116 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10117 } 10118 10119 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10120 10121 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 10122 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 10123 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 10124 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 10125 dm_new_con_state->update_hdcp = true; 10126 continue; 10127 } 10128 10129 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 10130 old_con_state, connector, adev->dm.hdcp_workqueue)) { 10131 /* when display is unplugged from mst hub, connctor will 10132 * be destroyed within dm_dp_mst_connector_destroy. connector 10133 * hdcp perperties, like type, undesired, desired, enabled, 10134 * will be lost. So, save hdcp properties into hdcp_work within 10135 * amdgpu_dm_atomic_commit_tail. if the same display is 10136 * plugged back with same display index, its hdcp properties 10137 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 10138 */ 10139 10140 bool enable_encryption = false; 10141 10142 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 10143 enable_encryption = true; 10144 10145 if (aconnector->dc_link && aconnector->dc_sink && 10146 aconnector->dc_link->type == dc_connection_mst_branch) { 10147 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 10148 struct hdcp_workqueue *hdcp_w = 10149 &hdcp_work[aconnector->dc_link->link_index]; 10150 10151 hdcp_w->hdcp_content_type[connector->index] = 10152 new_con_state->hdcp_content_type; 10153 hdcp_w->content_protection[connector->index] = 10154 new_con_state->content_protection; 10155 } 10156 10157 if (new_crtc_state && new_crtc_state->mode_changed && 10158 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 10159 enable_encryption = true; 10160 10161 drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 10162 10163 if (aconnector->dc_link) 10164 hdcp_update_display( 10165 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 10166 new_con_state->hdcp_content_type, enable_encryption); 10167 } 10168 } 10169 10170 /* Handle connector state changes */ 10171 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10172 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10173 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10174 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10175 struct dc_surface_update *dummy_updates; 10176 struct dc_stream_update stream_update; 10177 struct dc_info_packet hdr_packet; 10178 struct dc_stream_status *status = NULL; 10179 bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false; 10180 10181 memset(&stream_update, 0, sizeof(stream_update)); 10182 10183 if (acrtc) { 10184 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10185 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10186 } 10187 10188 /* Skip any modesets/resets */ 10189 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 10190 continue; 10191 10192 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10193 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10194 10195 scaling_changed = is_scaling_state_different(dm_new_con_state, 10196 dm_old_con_state); 10197 10198 if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) && 10199 (dm_old_crtc_state->stream->output_color_space != 10200 get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state))) 10201 output_color_space_changed = true; 10202 10203 abm_changed = dm_new_crtc_state->abm_level != 10204 dm_old_crtc_state->abm_level; 10205 10206 hdr_changed = 10207 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 10208 10209 if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed) 10210 continue; 10211 10212 stream_update.stream = dm_new_crtc_state->stream; 10213 if (scaling_changed) { 10214 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 10215 dm_new_con_state, dm_new_crtc_state->stream); 10216 10217 stream_update.src = dm_new_crtc_state->stream->src; 10218 stream_update.dst = dm_new_crtc_state->stream->dst; 10219 } 10220 10221 if (output_color_space_changed) { 10222 dm_new_crtc_state->stream->output_color_space 10223 = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state); 10224 10225 stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space; 10226 } 10227 10228 if (abm_changed) { 10229 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 10230 10231 stream_update.abm_level = &dm_new_crtc_state->abm_level; 10232 } 10233 10234 if (hdr_changed) { 10235 fill_hdr_info_packet(new_con_state, &hdr_packet); 10236 stream_update.hdr_static_metadata = &hdr_packet; 10237 } 10238 10239 status = dc_stream_get_status(dm_new_crtc_state->stream); 10240 10241 if (WARN_ON(!status)) 10242 continue; 10243 10244 WARN_ON(!status->plane_count); 10245 10246 /* 10247 * TODO: DC refuses to perform stream updates without a dc_surface_update. 10248 * Here we create an empty update on each plane. 10249 * To fix this, DC should permit updating only stream properties. 10250 */ 10251 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 10252 if (!dummy_updates) { 10253 drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); 10254 continue; 10255 } 10256 for (j = 0; j < status->plane_count; j++) 10257 dummy_updates[j].surface = status->plane_states[0]; 10258 10259 sort(dummy_updates, status->plane_count, 10260 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); 10261 10262 mutex_lock(&dm->dc_lock); 10263 dc_exit_ips_for_hw_access(dm->dc); 10264 dc_update_planes_and_stream(dm->dc, 10265 dummy_updates, 10266 status->plane_count, 10267 dm_new_crtc_state->stream, 10268 &stream_update); 10269 mutex_unlock(&dm->dc_lock); 10270 kfree(dummy_updates); 10271 } 10272 10273 /** 10274 * Enable interrupts for CRTCs that are newly enabled or went through 10275 * a modeset. It was intentionally deferred until after the front end 10276 * state was modified to wait until the OTG was on and so the IRQ 10277 * handlers didn't access stale or invalid state. 10278 */ 10279 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10280 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10281 #ifdef CONFIG_DEBUG_FS 10282 enum amdgpu_dm_pipe_crc_source cur_crc_src; 10283 #endif 10284 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 10285 if (old_crtc_state->active && !new_crtc_state->active) 10286 crtc_disable_count++; 10287 10288 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10289 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10290 10291 /* For freesync config update on crtc state and params for irq */ 10292 update_stream_irq_parameters(dm, dm_new_crtc_state); 10293 10294 #ifdef CONFIG_DEBUG_FS 10295 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10296 cur_crc_src = acrtc->dm_irq_params.crc_src; 10297 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10298 #endif 10299 10300 if (new_crtc_state->active && 10301 (!old_crtc_state->active || 10302 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10303 dc_stream_retain(dm_new_crtc_state->stream); 10304 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10305 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); 10306 } 10307 /* Handle vrr on->off / off->on transitions */ 10308 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 10309 10310 #ifdef CONFIG_DEBUG_FS 10311 if (new_crtc_state->active && 10312 (!old_crtc_state->active || 10313 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10314 /** 10315 * Frontend may have changed so reapply the CRC capture 10316 * settings for the stream. 10317 */ 10318 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10319 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10320 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10321 uint8_t cnt; 10322 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10323 for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { 10324 if (acrtc->dm_irq_params.window_param[cnt].enable) { 10325 acrtc->dm_irq_params.window_param[cnt].update_win = true; 10326 10327 /** 10328 * It takes 2 frames for HW to stably generate CRC when 10329 * resuming from suspend, so we set skip_frame_cnt 2. 10330 */ 10331 acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2; 10332 } 10333 } 10334 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10335 } 10336 #endif 10337 if (amdgpu_dm_crtc_configure_crc_source( 10338 crtc, dm_new_crtc_state, cur_crc_src)) 10339 drm_dbg_atomic(dev, "Failed to configure crc source"); 10340 } 10341 } 10342 #endif 10343 } 10344 10345 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 10346 if (new_crtc_state->async_flip) 10347 wait_for_vblank = false; 10348 10349 /* update planes when needed per crtc*/ 10350 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 10351 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10352 10353 if (dm_new_crtc_state->stream) 10354 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 10355 } 10356 10357 /* Enable writeback */ 10358 for_each_new_connector_in_state(state, connector, new_con_state, i) { 10359 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10360 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10361 10362 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 10363 continue; 10364 10365 if (!new_con_state->writeback_job) 10366 continue; 10367 10368 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10369 10370 if (!new_crtc_state) 10371 continue; 10372 10373 if (acrtc->wb_enabled) 10374 continue; 10375 10376 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10377 10378 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); 10379 acrtc->wb_enabled = true; 10380 } 10381 10382 /* Update audio instances for each connector. */ 10383 amdgpu_dm_commit_audio(dev, state); 10384 10385 /* restore the backlight level */ 10386 for (i = 0; i < dm->num_of_edps; i++) { 10387 if (dm->backlight_dev[i] && 10388 (dm->actual_brightness[i] != dm->brightness[i])) 10389 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10390 } 10391 10392 /* 10393 * send vblank event on all events not handled in flip and 10394 * mark consumed event for drm_atomic_helper_commit_hw_done 10395 */ 10396 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10397 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10398 10399 if (new_crtc_state->event) 10400 drm_send_event_locked(dev, &new_crtc_state->event->base); 10401 10402 new_crtc_state->event = NULL; 10403 } 10404 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10405 10406 /* Signal HW programming completion */ 10407 drm_atomic_helper_commit_hw_done(state); 10408 10409 if (wait_for_vblank) 10410 drm_atomic_helper_wait_for_flip_done(dev, state); 10411 10412 drm_atomic_helper_cleanup_planes(dev, state); 10413 10414 /* Don't free the memory if we are hitting this as part of suspend. 10415 * This way we don't free any memory during suspend; see 10416 * amdgpu_bo_free_kernel(). The memory will be freed in the first 10417 * non-suspend modeset or when the driver is torn down. 10418 */ 10419 if (!adev->in_suspend) { 10420 /* return the stolen vga memory back to VRAM */ 10421 if (!adev->mman.keep_stolen_vga_memory) 10422 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 10423 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 10424 } 10425 10426 /* 10427 * Finally, drop a runtime PM reference for each newly disabled CRTC, 10428 * so we can put the GPU into runtime suspend if we're not driving any 10429 * displays anymore 10430 */ 10431 for (i = 0; i < crtc_disable_count; i++) 10432 pm_runtime_put_autosuspend(dev->dev); 10433 pm_runtime_mark_last_busy(dev->dev); 10434 10435 trace_amdgpu_dm_atomic_commit_tail_finish(state); 10436 } 10437 10438 static int dm_force_atomic_commit(struct drm_connector *connector) 10439 { 10440 int ret = 0; 10441 struct drm_device *ddev = connector->dev; 10442 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 10443 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10444 struct drm_plane *plane = disconnected_acrtc->base.primary; 10445 struct drm_connector_state *conn_state; 10446 struct drm_crtc_state *crtc_state; 10447 struct drm_plane_state *plane_state; 10448 10449 if (!state) 10450 return -ENOMEM; 10451 10452 state->acquire_ctx = ddev->mode_config.acquire_ctx; 10453 10454 /* Construct an atomic state to restore previous display setting */ 10455 10456 /* 10457 * Attach connectors to drm_atomic_state 10458 */ 10459 conn_state = drm_atomic_get_connector_state(state, connector); 10460 10461 /* Check for error in getting connector state */ 10462 if (IS_ERR(conn_state)) { 10463 ret = PTR_ERR(conn_state); 10464 goto out; 10465 } 10466 10467 /* Attach crtc to drm_atomic_state*/ 10468 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 10469 10470 /* Check for error in getting crtc state */ 10471 if (IS_ERR(crtc_state)) { 10472 ret = PTR_ERR(crtc_state); 10473 goto out; 10474 } 10475 10476 /* force a restore */ 10477 crtc_state->mode_changed = true; 10478 10479 /* Attach plane to drm_atomic_state */ 10480 plane_state = drm_atomic_get_plane_state(state, plane); 10481 10482 /* Check for error in getting plane state */ 10483 if (IS_ERR(plane_state)) { 10484 ret = PTR_ERR(plane_state); 10485 goto out; 10486 } 10487 10488 /* Call commit internally with the state we just constructed */ 10489 ret = drm_atomic_commit(state); 10490 10491 out: 10492 drm_atomic_state_put(state); 10493 if (ret) 10494 drm_err(ddev, "Restoring old state failed with %i\n", ret); 10495 10496 return ret; 10497 } 10498 10499 /* 10500 * This function handles all cases when set mode does not come upon hotplug. 10501 * This includes when a display is unplugged then plugged back into the 10502 * same port and when running without usermode desktop manager supprot 10503 */ 10504 void dm_restore_drm_connector_state(struct drm_device *dev, 10505 struct drm_connector *connector) 10506 { 10507 struct amdgpu_dm_connector *aconnector; 10508 struct amdgpu_crtc *disconnected_acrtc; 10509 struct dm_crtc_state *acrtc_state; 10510 10511 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10512 return; 10513 10514 aconnector = to_amdgpu_dm_connector(connector); 10515 10516 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10517 return; 10518 10519 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10520 if (!disconnected_acrtc) 10521 return; 10522 10523 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10524 if (!acrtc_state->stream) 10525 return; 10526 10527 /* 10528 * If the previous sink is not released and different from the current, 10529 * we deduce we are in a state where we can not rely on usermode call 10530 * to turn on the display, so we do it here 10531 */ 10532 if (acrtc_state->stream->sink != aconnector->dc_sink) 10533 dm_force_atomic_commit(&aconnector->base); 10534 } 10535 10536 /* 10537 * Grabs all modesetting locks to serialize against any blocking commits, 10538 * Waits for completion of all non blocking commits. 10539 */ 10540 static int do_aquire_global_lock(struct drm_device *dev, 10541 struct drm_atomic_state *state) 10542 { 10543 struct drm_crtc *crtc; 10544 struct drm_crtc_commit *commit; 10545 long ret; 10546 10547 /* 10548 * Adding all modeset locks to aquire_ctx will 10549 * ensure that when the framework release it the 10550 * extra locks we are locking here will get released to 10551 */ 10552 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10553 if (ret) 10554 return ret; 10555 10556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10557 spin_lock(&crtc->commit_lock); 10558 commit = list_first_entry_or_null(&crtc->commit_list, 10559 struct drm_crtc_commit, commit_entry); 10560 if (commit) 10561 drm_crtc_commit_get(commit); 10562 spin_unlock(&crtc->commit_lock); 10563 10564 if (!commit) 10565 continue; 10566 10567 /* 10568 * Make sure all pending HW programming completed and 10569 * page flips done 10570 */ 10571 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10572 10573 if (ret > 0) 10574 ret = wait_for_completion_interruptible_timeout( 10575 &commit->flip_done, 10*HZ); 10576 10577 if (ret == 0) 10578 drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n", 10579 crtc->base.id, crtc->name); 10580 10581 drm_crtc_commit_put(commit); 10582 } 10583 10584 return ret < 0 ? ret : 0; 10585 } 10586 10587 static void get_freesync_config_for_crtc( 10588 struct dm_crtc_state *new_crtc_state, 10589 struct dm_connector_state *new_con_state) 10590 { 10591 struct mod_freesync_config config = {0}; 10592 struct amdgpu_dm_connector *aconnector; 10593 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10594 int vrefresh = drm_mode_vrefresh(mode); 10595 bool fs_vid_mode = false; 10596 10597 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 10598 return; 10599 10600 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); 10601 10602 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10603 vrefresh >= aconnector->min_vfreq && 10604 vrefresh <= aconnector->max_vfreq; 10605 10606 if (new_crtc_state->vrr_supported) { 10607 new_crtc_state->stream->ignore_msa_timing_param = true; 10608 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10609 10610 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10611 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10612 config.vsif_supported = true; 10613 config.btr = true; 10614 10615 if (fs_vid_mode) { 10616 config.state = VRR_STATE_ACTIVE_FIXED; 10617 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10618 goto out; 10619 } else if (new_crtc_state->base.vrr_enabled) { 10620 config.state = VRR_STATE_ACTIVE_VARIABLE; 10621 } else { 10622 config.state = VRR_STATE_INACTIVE; 10623 } 10624 } 10625 out: 10626 new_crtc_state->freesync_config = config; 10627 } 10628 10629 static void reset_freesync_config_for_crtc( 10630 struct dm_crtc_state *new_crtc_state) 10631 { 10632 new_crtc_state->vrr_supported = false; 10633 10634 memset(&new_crtc_state->vrr_infopacket, 0, 10635 sizeof(new_crtc_state->vrr_infopacket)); 10636 } 10637 10638 static bool 10639 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10640 struct drm_crtc_state *new_crtc_state) 10641 { 10642 const struct drm_display_mode *old_mode, *new_mode; 10643 10644 if (!old_crtc_state || !new_crtc_state) 10645 return false; 10646 10647 old_mode = &old_crtc_state->mode; 10648 new_mode = &new_crtc_state->mode; 10649 10650 if (old_mode->clock == new_mode->clock && 10651 old_mode->hdisplay == new_mode->hdisplay && 10652 old_mode->vdisplay == new_mode->vdisplay && 10653 old_mode->htotal == new_mode->htotal && 10654 old_mode->vtotal != new_mode->vtotal && 10655 old_mode->hsync_start == new_mode->hsync_start && 10656 old_mode->vsync_start != new_mode->vsync_start && 10657 old_mode->hsync_end == new_mode->hsync_end && 10658 old_mode->vsync_end != new_mode->vsync_end && 10659 old_mode->hskew == new_mode->hskew && 10660 old_mode->vscan == new_mode->vscan && 10661 (old_mode->vsync_end - old_mode->vsync_start) == 10662 (new_mode->vsync_end - new_mode->vsync_start)) 10663 return true; 10664 10665 return false; 10666 } 10667 10668 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 10669 { 10670 u64 num, den, res; 10671 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10672 10673 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10674 10675 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10676 den = (unsigned long long)new_crtc_state->mode.htotal * 10677 (unsigned long long)new_crtc_state->mode.vtotal; 10678 10679 res = div_u64(num, den); 10680 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10681 } 10682 10683 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10684 struct drm_atomic_state *state, 10685 struct drm_crtc *crtc, 10686 struct drm_crtc_state *old_crtc_state, 10687 struct drm_crtc_state *new_crtc_state, 10688 bool enable, 10689 bool *lock_and_validation_needed) 10690 { 10691 struct dm_atomic_state *dm_state = NULL; 10692 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10693 struct dc_stream_state *new_stream; 10694 struct amdgpu_device *adev = dm->adev; 10695 int ret = 0; 10696 10697 /* 10698 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10699 * update changed items 10700 */ 10701 struct amdgpu_crtc *acrtc = NULL; 10702 struct drm_connector *connector = NULL; 10703 struct amdgpu_dm_connector *aconnector = NULL; 10704 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10705 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10706 10707 new_stream = NULL; 10708 10709 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10710 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10711 acrtc = to_amdgpu_crtc(crtc); 10712 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10713 if (connector) 10714 aconnector = to_amdgpu_dm_connector(connector); 10715 10716 /* TODO This hack should go away */ 10717 if (connector && enable) { 10718 /* Make sure fake sink is created in plug-in scenario */ 10719 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10720 connector); 10721 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10722 connector); 10723 10724 if (WARN_ON(!drm_new_conn_state)) { 10725 ret = -EINVAL; 10726 goto fail; 10727 } 10728 10729 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10730 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10731 10732 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10733 goto skip_modeset; 10734 10735 new_stream = create_validate_stream_for_sink(connector, 10736 &new_crtc_state->mode, 10737 dm_new_conn_state, 10738 dm_old_crtc_state->stream); 10739 10740 /* 10741 * we can have no stream on ACTION_SET if a display 10742 * was disconnected during S3, in this case it is not an 10743 * error, the OS will be updated after detection, and 10744 * will do the right thing on next atomic commit 10745 */ 10746 10747 if (!new_stream) { 10748 drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", 10749 __func__, acrtc->base.base.id); 10750 ret = -ENOMEM; 10751 goto fail; 10752 } 10753 10754 /* 10755 * TODO: Check VSDB bits to decide whether this should 10756 * be enabled or not. 10757 */ 10758 new_stream->triggered_crtc_reset.enabled = 10759 dm->force_timing_sync; 10760 10761 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10762 10763 ret = fill_hdr_info_packet(drm_new_conn_state, 10764 &new_stream->hdr_static_metadata); 10765 if (ret) 10766 goto fail; 10767 10768 /* 10769 * If we already removed the old stream from the context 10770 * (and set the new stream to NULL) then we can't reuse 10771 * the old stream even if the stream and scaling are unchanged. 10772 * We'll hit the BUG_ON and black screen. 10773 * 10774 * TODO: Refactor this function to allow this check to work 10775 * in all conditions. 10776 */ 10777 if (amdgpu_freesync_vid_mode && 10778 dm_new_crtc_state->stream && 10779 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10780 goto skip_modeset; 10781 10782 if (dm_new_crtc_state->stream && 10783 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10784 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10785 new_crtc_state->mode_changed = false; 10786 drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", 10787 new_crtc_state->mode_changed); 10788 } 10789 } 10790 10791 /* mode_changed flag may get updated above, need to check again */ 10792 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10793 goto skip_modeset; 10794 10795 drm_dbg_state(state->dev, 10796 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 10797 acrtc->crtc_id, 10798 new_crtc_state->enable, 10799 new_crtc_state->active, 10800 new_crtc_state->planes_changed, 10801 new_crtc_state->mode_changed, 10802 new_crtc_state->active_changed, 10803 new_crtc_state->connectors_changed); 10804 10805 /* Remove stream for any changed/disabled CRTC */ 10806 if (!enable) { 10807 10808 if (!dm_old_crtc_state->stream) 10809 goto skip_modeset; 10810 10811 /* Unset freesync video if it was active before */ 10812 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 10813 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 10814 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 10815 } 10816 10817 /* Now check if we should set freesync video mode */ 10818 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 10819 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10820 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 10821 is_timing_unchanged_for_freesync(new_crtc_state, 10822 old_crtc_state)) { 10823 new_crtc_state->mode_changed = false; 10824 drm_dbg_driver(adev_to_drm(adev), 10825 "Mode change not required for front porch change, setting mode_changed to %d", 10826 new_crtc_state->mode_changed); 10827 10828 set_freesync_fixed_config(dm_new_crtc_state); 10829 10830 goto skip_modeset; 10831 } else if (amdgpu_freesync_vid_mode && aconnector && 10832 is_freesync_video_mode(&new_crtc_state->mode, 10833 aconnector)) { 10834 struct drm_display_mode *high_mode; 10835 10836 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10837 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 10838 set_freesync_fixed_config(dm_new_crtc_state); 10839 } 10840 10841 ret = dm_atomic_get_state(state, &dm_state); 10842 if (ret) 10843 goto fail; 10844 10845 drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", 10846 crtc->base.id); 10847 10848 /* i.e. reset mode */ 10849 if (dc_state_remove_stream( 10850 dm->dc, 10851 dm_state->context, 10852 dm_old_crtc_state->stream) != DC_OK) { 10853 ret = -EINVAL; 10854 goto fail; 10855 } 10856 10857 dc_stream_release(dm_old_crtc_state->stream); 10858 dm_new_crtc_state->stream = NULL; 10859 10860 reset_freesync_config_for_crtc(dm_new_crtc_state); 10861 10862 *lock_and_validation_needed = true; 10863 10864 } else {/* Add stream for any updated/enabled CRTC */ 10865 /* 10866 * Quick fix to prevent NULL pointer on new_stream when 10867 * added MST connectors not found in existing crtc_state in the chained mode 10868 * TODO: need to dig out the root cause of that 10869 */ 10870 if (!connector) 10871 goto skip_modeset; 10872 10873 if (modereset_required(new_crtc_state)) 10874 goto skip_modeset; 10875 10876 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 10877 dm_old_crtc_state->stream)) { 10878 10879 WARN_ON(dm_new_crtc_state->stream); 10880 10881 ret = dm_atomic_get_state(state, &dm_state); 10882 if (ret) 10883 goto fail; 10884 10885 dm_new_crtc_state->stream = new_stream; 10886 10887 dc_stream_retain(new_stream); 10888 10889 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10890 crtc->base.id); 10891 10892 if (dc_state_add_stream( 10893 dm->dc, 10894 dm_state->context, 10895 dm_new_crtc_state->stream) != DC_OK) { 10896 ret = -EINVAL; 10897 goto fail; 10898 } 10899 10900 *lock_and_validation_needed = true; 10901 } 10902 } 10903 10904 skip_modeset: 10905 /* Release extra reference */ 10906 if (new_stream) 10907 dc_stream_release(new_stream); 10908 10909 /* 10910 * We want to do dc stream updates that do not require a 10911 * full modeset below. 10912 */ 10913 if (!(enable && connector && new_crtc_state->active)) 10914 return 0; 10915 /* 10916 * Given above conditions, the dc state cannot be NULL because: 10917 * 1. We're in the process of enabling CRTCs (just been added 10918 * to the dc context, or already is on the context) 10919 * 2. Has a valid connector attached, and 10920 * 3. Is currently active and enabled. 10921 * => The dc stream state currently exists. 10922 */ 10923 BUG_ON(dm_new_crtc_state->stream == NULL); 10924 10925 /* Scaling or underscan settings */ 10926 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10927 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10928 update_stream_scaling_settings( 10929 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10930 10931 /* ABM settings */ 10932 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10933 10934 /* 10935 * Color management settings. We also update color properties 10936 * when a modeset is needed, to ensure it gets reprogrammed. 10937 */ 10938 if (dm_new_crtc_state->base.color_mgmt_changed || 10939 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || 10940 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10941 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10942 if (ret) 10943 goto fail; 10944 } 10945 10946 /* Update Freesync settings. */ 10947 get_freesync_config_for_crtc(dm_new_crtc_state, 10948 dm_new_conn_state); 10949 10950 return ret; 10951 10952 fail: 10953 if (new_stream) 10954 dc_stream_release(new_stream); 10955 return ret; 10956 } 10957 10958 static bool should_reset_plane(struct drm_atomic_state *state, 10959 struct drm_plane *plane, 10960 struct drm_plane_state *old_plane_state, 10961 struct drm_plane_state *new_plane_state) 10962 { 10963 struct drm_plane *other; 10964 struct drm_plane_state *old_other_state, *new_other_state; 10965 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10966 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 10967 struct amdgpu_device *adev = drm_to_adev(plane->dev); 10968 int i; 10969 10970 /* 10971 * TODO: Remove this hack for all asics once it proves that the 10972 * fast updates works fine on DCN3.2+. 10973 */ 10974 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 10975 state->allow_modeset) 10976 return true; 10977 10978 if (amdgpu_in_reset(adev) && state->allow_modeset) 10979 return true; 10980 10981 /* Exit early if we know that we're adding or removing the plane. */ 10982 if (old_plane_state->crtc != new_plane_state->crtc) 10983 return true; 10984 10985 /* old crtc == new_crtc == NULL, plane not in context. */ 10986 if (!new_plane_state->crtc) 10987 return false; 10988 10989 new_crtc_state = 10990 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10991 old_crtc_state = 10992 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); 10993 10994 if (!new_crtc_state) 10995 return true; 10996 10997 /* 10998 * A change in cursor mode means a new dc pipe needs to be acquired or 10999 * released from the state 11000 */ 11001 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 11002 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 11003 if (plane->type == DRM_PLANE_TYPE_CURSOR && 11004 old_dm_crtc_state != NULL && 11005 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { 11006 return true; 11007 } 11008 11009 /* CRTC Degamma changes currently require us to recreate planes. */ 11010 if (new_crtc_state->color_mgmt_changed) 11011 return true; 11012 11013 /* 11014 * On zpos change, planes need to be reordered by removing and re-adding 11015 * them one by one to the dc state, in order of descending zpos. 11016 * 11017 * TODO: We can likely skip bandwidth validation if the only thing that 11018 * changed about the plane was it'z z-ordering. 11019 */ 11020 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) 11021 return true; 11022 11023 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 11024 return true; 11025 11026 /* 11027 * If there are any new primary or overlay planes being added or 11028 * removed then the z-order can potentially change. To ensure 11029 * correct z-order and pipe acquisition the current DC architecture 11030 * requires us to remove and recreate all existing planes. 11031 * 11032 * TODO: Come up with a more elegant solution for this. 11033 */ 11034 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 11035 struct amdgpu_framebuffer *old_afb, *new_afb; 11036 struct dm_plane_state *dm_new_other_state, *dm_old_other_state; 11037 11038 dm_new_other_state = to_dm_plane_state(new_other_state); 11039 dm_old_other_state = to_dm_plane_state(old_other_state); 11040 11041 if (other->type == DRM_PLANE_TYPE_CURSOR) 11042 continue; 11043 11044 if (old_other_state->crtc != new_plane_state->crtc && 11045 new_other_state->crtc != new_plane_state->crtc) 11046 continue; 11047 11048 if (old_other_state->crtc != new_other_state->crtc) 11049 return true; 11050 11051 /* Src/dst size and scaling updates. */ 11052 if (old_other_state->src_w != new_other_state->src_w || 11053 old_other_state->src_h != new_other_state->src_h || 11054 old_other_state->crtc_w != new_other_state->crtc_w || 11055 old_other_state->crtc_h != new_other_state->crtc_h) 11056 return true; 11057 11058 /* Rotation / mirroring updates. */ 11059 if (old_other_state->rotation != new_other_state->rotation) 11060 return true; 11061 11062 /* Blending updates. */ 11063 if (old_other_state->pixel_blend_mode != 11064 new_other_state->pixel_blend_mode) 11065 return true; 11066 11067 /* Alpha updates. */ 11068 if (old_other_state->alpha != new_other_state->alpha) 11069 return true; 11070 11071 /* Colorspace changes. */ 11072 if (old_other_state->color_range != new_other_state->color_range || 11073 old_other_state->color_encoding != new_other_state->color_encoding) 11074 return true; 11075 11076 /* HDR/Transfer Function changes. */ 11077 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || 11078 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || 11079 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || 11080 dm_old_other_state->ctm != dm_new_other_state->ctm || 11081 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || 11082 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || 11083 dm_old_other_state->lut3d != dm_new_other_state->lut3d || 11084 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || 11085 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) 11086 return true; 11087 11088 /* Framebuffer checks fall at the end. */ 11089 if (!old_other_state->fb || !new_other_state->fb) 11090 continue; 11091 11092 /* Pixel format changes can require bandwidth updates. */ 11093 if (old_other_state->fb->format != new_other_state->fb->format) 11094 return true; 11095 11096 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 11097 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 11098 11099 /* Tiling and DCC changes also require bandwidth updates. */ 11100 if (old_afb->tiling_flags != new_afb->tiling_flags || 11101 old_afb->base.modifier != new_afb->base.modifier) 11102 return true; 11103 } 11104 11105 return false; 11106 } 11107 11108 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 11109 struct drm_plane_state *new_plane_state, 11110 struct drm_framebuffer *fb) 11111 { 11112 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 11113 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 11114 unsigned int pitch; 11115 bool linear; 11116 11117 if (fb->width > new_acrtc->max_cursor_width || 11118 fb->height > new_acrtc->max_cursor_height) { 11119 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 11120 new_plane_state->fb->width, 11121 new_plane_state->fb->height); 11122 return -EINVAL; 11123 } 11124 if (new_plane_state->src_w != fb->width << 16 || 11125 new_plane_state->src_h != fb->height << 16) { 11126 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 11127 return -EINVAL; 11128 } 11129 11130 /* Pitch in pixels */ 11131 pitch = fb->pitches[0] / fb->format->cpp[0]; 11132 11133 if (fb->width != pitch) { 11134 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 11135 fb->width, pitch); 11136 return -EINVAL; 11137 } 11138 11139 switch (pitch) { 11140 case 64: 11141 case 128: 11142 case 256: 11143 /* FB pitch is supported by cursor plane */ 11144 break; 11145 default: 11146 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 11147 return -EINVAL; 11148 } 11149 11150 /* Core DRM takes care of checking FB modifiers, so we only need to 11151 * check tiling flags when the FB doesn't have a modifier. 11152 */ 11153 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 11154 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 11155 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; 11156 } else if (adev->family >= AMDGPU_FAMILY_AI) { 11157 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 11158 } else { 11159 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 11160 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 11161 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 11162 } 11163 if (!linear) { 11164 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 11165 return -EINVAL; 11166 } 11167 } 11168 11169 return 0; 11170 } 11171 11172 /* 11173 * Helper function for checking the cursor in native mode 11174 */ 11175 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, 11176 struct drm_plane *plane, 11177 struct drm_plane_state *new_plane_state, 11178 bool enable) 11179 { 11180 11181 struct amdgpu_crtc *new_acrtc; 11182 int ret; 11183 11184 if (!enable || !new_plane_crtc || 11185 drm_atomic_plane_disabling(plane->state, new_plane_state)) 11186 return 0; 11187 11188 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 11189 11190 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 11191 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 11192 return -EINVAL; 11193 } 11194 11195 if (new_plane_state->fb) { 11196 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 11197 new_plane_state->fb); 11198 if (ret) 11199 return ret; 11200 } 11201 11202 return 0; 11203 } 11204 11205 static bool dm_should_update_native_cursor(struct drm_atomic_state *state, 11206 struct drm_crtc *old_plane_crtc, 11207 struct drm_crtc *new_plane_crtc, 11208 bool enable) 11209 { 11210 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11211 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11212 11213 if (!enable) { 11214 if (old_plane_crtc == NULL) 11215 return true; 11216 11217 old_crtc_state = drm_atomic_get_old_crtc_state( 11218 state, old_plane_crtc); 11219 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11220 11221 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 11222 } else { 11223 if (new_plane_crtc == NULL) 11224 return true; 11225 11226 new_crtc_state = drm_atomic_get_new_crtc_state( 11227 state, new_plane_crtc); 11228 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11229 11230 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; 11231 } 11232 } 11233 11234 static int dm_update_plane_state(struct dc *dc, 11235 struct drm_atomic_state *state, 11236 struct drm_plane *plane, 11237 struct drm_plane_state *old_plane_state, 11238 struct drm_plane_state *new_plane_state, 11239 bool enable, 11240 bool *lock_and_validation_needed, 11241 bool *is_top_most_overlay) 11242 { 11243 11244 struct dm_atomic_state *dm_state = NULL; 11245 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 11246 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11247 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 11248 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 11249 bool needs_reset, update_native_cursor; 11250 int ret = 0; 11251 11252 11253 new_plane_crtc = new_plane_state->crtc; 11254 old_plane_crtc = old_plane_state->crtc; 11255 dm_new_plane_state = to_dm_plane_state(new_plane_state); 11256 dm_old_plane_state = to_dm_plane_state(old_plane_state); 11257 11258 update_native_cursor = dm_should_update_native_cursor(state, 11259 old_plane_crtc, 11260 new_plane_crtc, 11261 enable); 11262 11263 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { 11264 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11265 new_plane_state, enable); 11266 if (ret) 11267 return ret; 11268 11269 return 0; 11270 } 11271 11272 needs_reset = should_reset_plane(state, plane, old_plane_state, 11273 new_plane_state); 11274 11275 /* Remove any changed/removed planes */ 11276 if (!enable) { 11277 if (!needs_reset) 11278 return 0; 11279 11280 if (!old_plane_crtc) 11281 return 0; 11282 11283 old_crtc_state = drm_atomic_get_old_crtc_state( 11284 state, old_plane_crtc); 11285 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11286 11287 if (!dm_old_crtc_state->stream) 11288 return 0; 11289 11290 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 11291 plane->base.id, old_plane_crtc->base.id); 11292 11293 ret = dm_atomic_get_state(state, &dm_state); 11294 if (ret) 11295 return ret; 11296 11297 if (!dc_state_remove_plane( 11298 dc, 11299 dm_old_crtc_state->stream, 11300 dm_old_plane_state->dc_state, 11301 dm_state->context)) { 11302 11303 return -EINVAL; 11304 } 11305 11306 if (dm_old_plane_state->dc_state) 11307 dc_plane_state_release(dm_old_plane_state->dc_state); 11308 11309 dm_new_plane_state->dc_state = NULL; 11310 11311 *lock_and_validation_needed = true; 11312 11313 } else { /* Add new planes */ 11314 struct dc_plane_state *dc_new_plane_state; 11315 11316 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 11317 return 0; 11318 11319 if (!new_plane_crtc) 11320 return 0; 11321 11322 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 11323 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11324 11325 if (!dm_new_crtc_state->stream) 11326 return 0; 11327 11328 if (!needs_reset) 11329 return 0; 11330 11331 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 11332 if (ret) 11333 goto out; 11334 11335 WARN_ON(dm_new_plane_state->dc_state); 11336 11337 dc_new_plane_state = dc_create_plane_state(dc); 11338 if (!dc_new_plane_state) { 11339 ret = -ENOMEM; 11340 goto out; 11341 } 11342 11343 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 11344 plane->base.id, new_plane_crtc->base.id); 11345 11346 ret = fill_dc_plane_attributes( 11347 drm_to_adev(new_plane_crtc->dev), 11348 dc_new_plane_state, 11349 new_plane_state, 11350 new_crtc_state); 11351 if (ret) { 11352 dc_plane_state_release(dc_new_plane_state); 11353 goto out; 11354 } 11355 11356 ret = dm_atomic_get_state(state, &dm_state); 11357 if (ret) { 11358 dc_plane_state_release(dc_new_plane_state); 11359 goto out; 11360 } 11361 11362 /* 11363 * Any atomic check errors that occur after this will 11364 * not need a release. The plane state will be attached 11365 * to the stream, and therefore part of the atomic 11366 * state. It'll be released when the atomic state is 11367 * cleaned. 11368 */ 11369 if (!dc_state_add_plane( 11370 dc, 11371 dm_new_crtc_state->stream, 11372 dc_new_plane_state, 11373 dm_state->context)) { 11374 11375 dc_plane_state_release(dc_new_plane_state); 11376 ret = -EINVAL; 11377 goto out; 11378 } 11379 11380 dm_new_plane_state->dc_state = dc_new_plane_state; 11381 11382 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 11383 11384 /* Tell DC to do a full surface update every time there 11385 * is a plane change. Inefficient, but works for now. 11386 */ 11387 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 11388 11389 *lock_and_validation_needed = true; 11390 } 11391 11392 out: 11393 /* If enabling cursor overlay failed, attempt fallback to native mode */ 11394 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { 11395 ret = dm_check_native_cursor_state(new_plane_crtc, plane, 11396 new_plane_state, enable); 11397 if (ret) 11398 return ret; 11399 11400 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; 11401 } 11402 11403 return ret; 11404 } 11405 11406 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 11407 int *src_w, int *src_h) 11408 { 11409 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 11410 case DRM_MODE_ROTATE_90: 11411 case DRM_MODE_ROTATE_270: 11412 *src_w = plane_state->src_h >> 16; 11413 *src_h = plane_state->src_w >> 16; 11414 break; 11415 case DRM_MODE_ROTATE_0: 11416 case DRM_MODE_ROTATE_180: 11417 default: 11418 *src_w = plane_state->src_w >> 16; 11419 *src_h = plane_state->src_h >> 16; 11420 break; 11421 } 11422 } 11423 11424 static void 11425 dm_get_plane_scale(struct drm_plane_state *plane_state, 11426 int *out_plane_scale_w, int *out_plane_scale_h) 11427 { 11428 int plane_src_w, plane_src_h; 11429 11430 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 11431 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; 11432 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; 11433 } 11434 11435 /* 11436 * The normalized_zpos value cannot be used by this iterator directly. It's only 11437 * calculated for enabled planes, potentially causing normalized_zpos collisions 11438 * between enabled/disabled planes in the atomic state. We need a unique value 11439 * so that the iterator will not generate the same object twice, or loop 11440 * indefinitely. 11441 */ 11442 static inline struct __drm_planes_state *__get_next_zpos( 11443 struct drm_atomic_state *state, 11444 struct __drm_planes_state *prev) 11445 { 11446 unsigned int highest_zpos = 0, prev_zpos = 256; 11447 uint32_t highest_id = 0, prev_id = UINT_MAX; 11448 struct drm_plane_state *new_plane_state; 11449 struct drm_plane *plane; 11450 int i, highest_i = -1; 11451 11452 if (prev != NULL) { 11453 prev_zpos = prev->new_state->zpos; 11454 prev_id = prev->ptr->base.id; 11455 } 11456 11457 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 11458 /* Skip planes with higher zpos than the previously returned */ 11459 if (new_plane_state->zpos > prev_zpos || 11460 (new_plane_state->zpos == prev_zpos && 11461 plane->base.id >= prev_id)) 11462 continue; 11463 11464 /* Save the index of the plane with highest zpos */ 11465 if (new_plane_state->zpos > highest_zpos || 11466 (new_plane_state->zpos == highest_zpos && 11467 plane->base.id > highest_id)) { 11468 highest_zpos = new_plane_state->zpos; 11469 highest_id = plane->base.id; 11470 highest_i = i; 11471 } 11472 } 11473 11474 if (highest_i < 0) 11475 return NULL; 11476 11477 return &state->planes[highest_i]; 11478 } 11479 11480 /* 11481 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate 11482 * by descending zpos, as read from the new plane state. This is the same 11483 * ordering as defined by drm_atomic_normalize_zpos(). 11484 */ 11485 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ 11486 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ 11487 __i != NULL; __i = __get_next_zpos((__state), __i)) \ 11488 for_each_if(((plane) = __i->ptr, \ 11489 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ 11490 (old_plane_state) = __i->old_state, \ 11491 (new_plane_state) = __i->new_state, 1)) 11492 11493 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 11494 { 11495 struct drm_connector *connector; 11496 struct drm_connector_state *conn_state, *old_conn_state; 11497 struct amdgpu_dm_connector *aconnector = NULL; 11498 int i; 11499 11500 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 11501 if (!conn_state->crtc) 11502 conn_state = old_conn_state; 11503 11504 if (conn_state->crtc != crtc) 11505 continue; 11506 11507 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 11508 continue; 11509 11510 aconnector = to_amdgpu_dm_connector(connector); 11511 if (!aconnector->mst_output_port || !aconnector->mst_root) 11512 aconnector = NULL; 11513 else 11514 break; 11515 } 11516 11517 if (!aconnector) 11518 return 0; 11519 11520 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 11521 } 11522 11523 /** 11524 * DOC: Cursor Modes - Native vs Overlay 11525 * 11526 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw 11527 * plane. It does not require a dedicated hw plane to enable, but it is 11528 * subjected to the same z-order and scaling as the hw plane. It also has format 11529 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB 11530 * hw plane. 11531 * 11532 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its 11533 * own scaling and z-pos. It also has no blending restrictions. It lends to a 11534 * cursor behavior more akin to a DRM client's expectations. However, it does 11535 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is 11536 * available. 11537 */ 11538 11539 /** 11540 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 11541 * @adev: amdgpu device 11542 * @state: DRM atomic state 11543 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor 11544 * @cursor_mode: Returns the required cursor mode on dm_crtc_state 11545 * 11546 * Get whether the cursor should be enabled in native mode, or overlay mode, on 11547 * the dm_crtc_state. 11548 * 11549 * The cursor should be enabled in overlay mode if there exists an underlying 11550 * plane - on which the cursor may be blended - that is either YUV formatted, or 11551 * scaled differently from the cursor. 11552 * 11553 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 11554 * calling this function. 11555 * 11556 * Return: 0 on success, or an error code if getting the cursor plane state 11557 * failed. 11558 */ 11559 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, 11560 struct drm_atomic_state *state, 11561 struct dm_crtc_state *dm_crtc_state, 11562 enum amdgpu_dm_cursor_mode *cursor_mode) 11563 { 11564 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; 11565 struct drm_crtc_state *crtc_state = &dm_crtc_state->base; 11566 struct drm_plane *plane; 11567 bool consider_mode_change = false; 11568 bool entire_crtc_covered = false; 11569 bool cursor_changed = false; 11570 int underlying_scale_w, underlying_scale_h; 11571 int cursor_scale_w, cursor_scale_h; 11572 int i; 11573 11574 /* Overlay cursor not supported on HW before DCN 11575 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 11576 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 11577 */ 11578 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 11579 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11580 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11581 return 0; 11582 } 11583 11584 /* Init cursor_mode to be the same as current */ 11585 *cursor_mode = dm_crtc_state->cursor_mode; 11586 11587 /* 11588 * Cursor mode can change if a plane's format changes, scale changes, is 11589 * enabled/disabled, or z-order changes. 11590 */ 11591 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 11592 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 11593 11594 /* Only care about planes on this CRTC */ 11595 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) 11596 continue; 11597 11598 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11599 cursor_changed = true; 11600 11601 if (drm_atomic_plane_enabling(old_plane_state, plane_state) || 11602 drm_atomic_plane_disabling(old_plane_state, plane_state) || 11603 old_plane_state->fb->format != plane_state->fb->format) { 11604 consider_mode_change = true; 11605 break; 11606 } 11607 11608 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 11609 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 11610 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 11611 consider_mode_change = true; 11612 break; 11613 } 11614 } 11615 11616 if (!consider_mode_change && !crtc_state->zpos_changed) 11617 return 0; 11618 11619 /* 11620 * If no cursor change on this CRTC, and not enabled on this CRTC, then 11621 * no need to set cursor mode. This avoids needlessly locking the cursor 11622 * state. 11623 */ 11624 if (!cursor_changed && 11625 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { 11626 return 0; 11627 } 11628 11629 cursor_state = drm_atomic_get_plane_state(state, 11630 crtc_state->crtc->cursor); 11631 if (IS_ERR(cursor_state)) 11632 return PTR_ERR(cursor_state); 11633 11634 /* Cursor is disabled */ 11635 if (!cursor_state->fb) 11636 return 0; 11637 11638 /* For all planes in descending z-order (all of which are below cursor 11639 * as per zpos definitions), check their scaling and format 11640 */ 11641 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { 11642 11643 /* Only care about non-cursor planes on this CRTC */ 11644 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || 11645 plane->type == DRM_PLANE_TYPE_CURSOR) 11646 continue; 11647 11648 /* Underlying plane is YUV format - use overlay cursor */ 11649 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 11650 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11651 return 0; 11652 } 11653 11654 dm_get_plane_scale(plane_state, 11655 &underlying_scale_w, &underlying_scale_h); 11656 dm_get_plane_scale(cursor_state, 11657 &cursor_scale_w, &cursor_scale_h); 11658 11659 /* Underlying plane has different scale - use overlay cursor */ 11660 if (cursor_scale_w != underlying_scale_w && 11661 cursor_scale_h != underlying_scale_h) { 11662 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11663 return 0; 11664 } 11665 11666 /* If this plane covers the whole CRTC, no need to check planes underneath */ 11667 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && 11668 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && 11669 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { 11670 entire_crtc_covered = true; 11671 break; 11672 } 11673 } 11674 11675 /* If planes do not cover the entire CRTC, use overlay mode to enable 11676 * cursor over holes 11677 */ 11678 if (entire_crtc_covered) 11679 *cursor_mode = DM_CURSOR_NATIVE_MODE; 11680 else 11681 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 11682 11683 return 0; 11684 } 11685 11686 static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, 11687 struct drm_atomic_state *state, 11688 struct drm_crtc_state *crtc_state) 11689 { 11690 struct drm_plane *plane; 11691 struct drm_plane_state *new_plane_state, *old_plane_state; 11692 11693 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { 11694 new_plane_state = drm_atomic_get_plane_state(state, plane); 11695 old_plane_state = drm_atomic_get_plane_state(state, plane); 11696 11697 if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) { 11698 drm_err(dev, "Failed to get plane state for plane %s\n", plane->name); 11699 return false; 11700 } 11701 11702 if (old_plane_state->fb && new_plane_state->fb && 11703 get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) 11704 return true; 11705 } 11706 11707 return false; 11708 } 11709 11710 /** 11711 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11712 * 11713 * @dev: The DRM device 11714 * @state: The atomic state to commit 11715 * 11716 * Validate that the given atomic state is programmable by DC into hardware. 11717 * This involves constructing a &struct dc_state reflecting the new hardware 11718 * state we wish to commit, then querying DC to see if it is programmable. It's 11719 * important not to modify the existing DC state. Otherwise, atomic_check 11720 * may unexpectedly commit hardware changes. 11721 * 11722 * When validating the DC state, it's important that the right locks are 11723 * acquired. For full updates case which removes/adds/updates streams on one 11724 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 11725 * that any such full update commit will wait for completion of any outstanding 11726 * flip using DRMs synchronization events. 11727 * 11728 * Note that DM adds the affected connectors for all CRTCs in state, when that 11729 * might not seem necessary. This is because DC stream creation requires the 11730 * DC sink, which is tied to the DRM connector state. Cleaning this up should 11731 * be possible but non-trivial - a possible TODO item. 11732 * 11733 * Return: -Error code if validation failed. 11734 */ 11735 static int amdgpu_dm_atomic_check(struct drm_device *dev, 11736 struct drm_atomic_state *state) 11737 { 11738 struct amdgpu_device *adev = drm_to_adev(dev); 11739 struct dm_atomic_state *dm_state = NULL; 11740 struct dc *dc = adev->dm.dc; 11741 struct drm_connector *connector; 11742 struct drm_connector_state *old_con_state, *new_con_state; 11743 struct drm_crtc *crtc; 11744 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11745 struct drm_plane *plane; 11746 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; 11747 enum dc_status status; 11748 int ret, i; 11749 bool lock_and_validation_needed = false; 11750 bool is_top_most_overlay = true; 11751 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11752 struct drm_dp_mst_topology_mgr *mgr; 11753 struct drm_dp_mst_topology_state *mst_state; 11754 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; 11755 11756 trace_amdgpu_dm_atomic_check_begin(state); 11757 11758 ret = drm_atomic_helper_check_modeset(dev, state); 11759 if (ret) { 11760 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); 11761 goto fail; 11762 } 11763 11764 /* Check connector changes */ 11765 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11766 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11767 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11768 11769 /* Skip connectors that are disabled or part of modeset already. */ 11770 if (!new_con_state->crtc) 11771 continue; 11772 11773 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 11774 if (IS_ERR(new_crtc_state)) { 11775 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); 11776 ret = PTR_ERR(new_crtc_state); 11777 goto fail; 11778 } 11779 11780 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 11781 dm_old_con_state->scaling != dm_new_con_state->scaling) 11782 new_crtc_state->connectors_changed = true; 11783 } 11784 11785 if (dc_resource_is_dsc_encoding_supported(dc)) { 11786 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11787 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11788 ret = add_affected_mst_dsc_crtcs(state, crtc); 11789 if (ret) { 11790 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); 11791 goto fail; 11792 } 11793 } 11794 } 11795 } 11796 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11797 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11798 11799 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 11800 !new_crtc_state->color_mgmt_changed && 11801 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 11802 dm_old_crtc_state->dsc_force_changed == false) 11803 continue; 11804 11805 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 11806 if (ret) { 11807 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); 11808 goto fail; 11809 } 11810 11811 if (!new_crtc_state->enable) 11812 continue; 11813 11814 ret = drm_atomic_add_affected_connectors(state, crtc); 11815 if (ret) { 11816 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); 11817 goto fail; 11818 } 11819 11820 ret = drm_atomic_add_affected_planes(state, crtc); 11821 if (ret) { 11822 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); 11823 goto fail; 11824 } 11825 11826 if (dm_old_crtc_state->dsc_force_changed) 11827 new_crtc_state->mode_changed = true; 11828 } 11829 11830 /* 11831 * Add all primary and overlay planes on the CRTC to the state 11832 * whenever a plane is enabled to maintain correct z-ordering 11833 * and to enable fast surface updates. 11834 */ 11835 drm_for_each_crtc(crtc, dev) { 11836 bool modified = false; 11837 11838 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11839 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11840 continue; 11841 11842 if (new_plane_state->crtc == crtc || 11843 old_plane_state->crtc == crtc) { 11844 modified = true; 11845 break; 11846 } 11847 } 11848 11849 if (!modified) 11850 continue; 11851 11852 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11853 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11854 continue; 11855 11856 new_plane_state = 11857 drm_atomic_get_plane_state(state, plane); 11858 11859 if (IS_ERR(new_plane_state)) { 11860 ret = PTR_ERR(new_plane_state); 11861 drm_dbg_atomic(dev, "new_plane_state is BAD\n"); 11862 goto fail; 11863 } 11864 } 11865 } 11866 11867 /* 11868 * DC consults the zpos (layer_index in DC terminology) to determine the 11869 * hw plane on which to enable the hw cursor (see 11870 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 11871 * atomic state, so call drm helper to normalize zpos. 11872 */ 11873 ret = drm_atomic_normalize_zpos(dev, state); 11874 if (ret) { 11875 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 11876 goto fail; 11877 } 11878 11879 /* 11880 * Determine whether cursors on each CRTC should be enabled in native or 11881 * overlay mode. 11882 */ 11883 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11884 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11885 11886 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 11887 &dm_new_crtc_state->cursor_mode); 11888 if (ret) { 11889 drm_dbg(dev, "Failed to determine cursor mode\n"); 11890 goto fail; 11891 } 11892 11893 /* 11894 * If overlay cursor is needed, DC cannot go through the 11895 * native cursor update path. All enabled planes on the CRTC 11896 * need to be added for DC to not disable a plane by mistake 11897 */ 11898 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 11899 ret = drm_atomic_add_affected_planes(state, crtc); 11900 if (ret) 11901 goto fail; 11902 } 11903 } 11904 11905 /* Remove exiting planes if they are modified */ 11906 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11907 11908 ret = dm_update_plane_state(dc, state, plane, 11909 old_plane_state, 11910 new_plane_state, 11911 false, 11912 &lock_and_validation_needed, 11913 &is_top_most_overlay); 11914 if (ret) { 11915 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11916 goto fail; 11917 } 11918 } 11919 11920 /* Disable all crtcs which require disable */ 11921 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11922 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11923 old_crtc_state, 11924 new_crtc_state, 11925 false, 11926 &lock_and_validation_needed); 11927 if (ret) { 11928 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); 11929 goto fail; 11930 } 11931 } 11932 11933 /* Enable all crtcs which require enable */ 11934 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11935 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11936 old_crtc_state, 11937 new_crtc_state, 11938 true, 11939 &lock_and_validation_needed); 11940 if (ret) { 11941 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); 11942 goto fail; 11943 } 11944 } 11945 11946 /* Add new/modified planes */ 11947 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11948 ret = dm_update_plane_state(dc, state, plane, 11949 old_plane_state, 11950 new_plane_state, 11951 true, 11952 &lock_and_validation_needed, 11953 &is_top_most_overlay); 11954 if (ret) { 11955 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); 11956 goto fail; 11957 } 11958 } 11959 11960 #if defined(CONFIG_DRM_AMD_DC_FP) 11961 if (dc_resource_is_dsc_encoding_supported(dc)) { 11962 ret = pre_validate_dsc(state, &dm_state, vars); 11963 if (ret != 0) 11964 goto fail; 11965 } 11966 #endif 11967 11968 /* Run this here since we want to validate the streams we created */ 11969 ret = drm_atomic_helper_check_planes(dev, state); 11970 if (ret) { 11971 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); 11972 goto fail; 11973 } 11974 11975 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11976 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11977 if (dm_new_crtc_state->mpo_requested) 11978 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); 11979 } 11980 11981 /* Check cursor restrictions */ 11982 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11983 enum amdgpu_dm_cursor_mode required_cursor_mode; 11984 int is_rotated, is_scaled; 11985 11986 /* Overlay cusor not subject to native cursor restrictions */ 11987 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11988 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 11989 continue; 11990 11991 /* Check if rotation or scaling is enabled on DCN401 */ 11992 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && 11993 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 11994 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 11995 11996 is_rotated = new_cursor_state && 11997 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); 11998 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || 11999 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); 12000 12001 if (is_rotated || is_scaled) { 12002 drm_dbg_driver( 12003 crtc->dev, 12004 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", 12005 crtc->base.id, crtc->name); 12006 ret = -EINVAL; 12007 goto fail; 12008 } 12009 } 12010 12011 /* If HW can only do native cursor, check restrictions again */ 12012 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, 12013 &required_cursor_mode); 12014 if (ret) { 12015 drm_dbg_driver(crtc->dev, 12016 "[CRTC:%d:%s] Checking cursor mode failed\n", 12017 crtc->base.id, crtc->name); 12018 goto fail; 12019 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12020 drm_dbg_driver(crtc->dev, 12021 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 12022 crtc->base.id, crtc->name); 12023 ret = -EINVAL; 12024 goto fail; 12025 } 12026 } 12027 12028 if (state->legacy_cursor_update) { 12029 /* 12030 * This is a fast cursor update coming from the plane update 12031 * helper, check if it can be done asynchronously for better 12032 * performance. 12033 */ 12034 state->async_update = 12035 !drm_atomic_helper_async_check(dev, state); 12036 12037 /* 12038 * Skip the remaining global validation if this is an async 12039 * update. Cursor updates can be done without affecting 12040 * state or bandwidth calcs and this avoids the performance 12041 * penalty of locking the private state object and 12042 * allocating a new dc_state. 12043 */ 12044 if (state->async_update) 12045 return 0; 12046 } 12047 12048 /* Check scaling and underscan changes*/ 12049 /* TODO Removed scaling changes validation due to inability to commit 12050 * new stream into context w\o causing full reset. Need to 12051 * decide how to handle. 12052 */ 12053 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 12054 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 12055 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 12056 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 12057 12058 /* Skip any modesets/resets */ 12059 if (!acrtc || drm_atomic_crtc_needs_modeset( 12060 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 12061 continue; 12062 12063 /* Skip any thing not scale or underscan changes */ 12064 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 12065 continue; 12066 12067 lock_and_validation_needed = true; 12068 } 12069 12070 /* set the slot info for each mst_state based on the link encoding format */ 12071 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 12072 struct amdgpu_dm_connector *aconnector; 12073 struct drm_connector *connector; 12074 struct drm_connector_list_iter iter; 12075 u8 link_coding_cap; 12076 12077 drm_connector_list_iter_begin(dev, &iter); 12078 drm_for_each_connector_iter(connector, &iter) { 12079 if (connector->index == mst_state->mgr->conn_base_id) { 12080 aconnector = to_amdgpu_dm_connector(connector); 12081 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 12082 drm_dp_mst_update_slots(mst_state, link_coding_cap); 12083 12084 break; 12085 } 12086 } 12087 drm_connector_list_iter_end(&iter); 12088 } 12089 12090 /** 12091 * Streams and planes are reset when there are changes that affect 12092 * bandwidth. Anything that affects bandwidth needs to go through 12093 * DC global validation to ensure that the configuration can be applied 12094 * to hardware. 12095 * 12096 * We have to currently stall out here in atomic_check for outstanding 12097 * commits to finish in this case because our IRQ handlers reference 12098 * DRM state directly - we can end up disabling interrupts too early 12099 * if we don't. 12100 * 12101 * TODO: Remove this stall and drop DM state private objects. 12102 */ 12103 if (lock_and_validation_needed) { 12104 ret = dm_atomic_get_state(state, &dm_state); 12105 if (ret) { 12106 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); 12107 goto fail; 12108 } 12109 12110 ret = do_aquire_global_lock(dev, state); 12111 if (ret) { 12112 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); 12113 goto fail; 12114 } 12115 12116 #if defined(CONFIG_DRM_AMD_DC_FP) 12117 if (dc_resource_is_dsc_encoding_supported(dc)) { 12118 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 12119 if (ret) { 12120 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); 12121 ret = -EINVAL; 12122 goto fail; 12123 } 12124 } 12125 #endif 12126 12127 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 12128 if (ret) { 12129 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); 12130 goto fail; 12131 } 12132 12133 /* 12134 * Perform validation of MST topology in the state: 12135 * We need to perform MST atomic check before calling 12136 * dc_validate_global_state(), or there is a chance 12137 * to get stuck in an infinite loop and hang eventually. 12138 */ 12139 ret = drm_dp_mst_atomic_check(state); 12140 if (ret) { 12141 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); 12142 goto fail; 12143 } 12144 status = dc_validate_global_state(dc, dm_state->context, true); 12145 if (status != DC_OK) { 12146 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", 12147 dc_status_to_str(status), status); 12148 ret = -EINVAL; 12149 goto fail; 12150 } 12151 } else { 12152 /* 12153 * The commit is a fast update. Fast updates shouldn't change 12154 * the DC context, affect global validation, and can have their 12155 * commit work done in parallel with other commits not touching 12156 * the same resource. If we have a new DC context as part of 12157 * the DM atomic state from validation we need to free it and 12158 * retain the existing one instead. 12159 * 12160 * Furthermore, since the DM atomic state only contains the DC 12161 * context and can safely be annulled, we can free the state 12162 * and clear the associated private object now to free 12163 * some memory and avoid a possible use-after-free later. 12164 */ 12165 12166 for (i = 0; i < state->num_private_objs; i++) { 12167 struct drm_private_obj *obj = state->private_objs[i].ptr; 12168 12169 if (obj->funcs == adev->dm.atomic_obj.funcs) { 12170 int j = state->num_private_objs-1; 12171 12172 dm_atomic_destroy_state(obj, 12173 state->private_objs[i].state); 12174 12175 /* If i is not at the end of the array then the 12176 * last element needs to be moved to where i was 12177 * before the array can safely be truncated. 12178 */ 12179 if (i != j) 12180 state->private_objs[i] = 12181 state->private_objs[j]; 12182 12183 state->private_objs[j].ptr = NULL; 12184 state->private_objs[j].state = NULL; 12185 state->private_objs[j].old_state = NULL; 12186 state->private_objs[j].new_state = NULL; 12187 12188 state->num_private_objs = j; 12189 break; 12190 } 12191 } 12192 } 12193 12194 /* Store the overall update type for use later in atomic check. */ 12195 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12196 struct dm_crtc_state *dm_new_crtc_state = 12197 to_dm_crtc_state(new_crtc_state); 12198 12199 /* 12200 * Only allow async flips for fast updates that don't change 12201 * the FB pitch, the DCC state, rotation, mem_type, etc. 12202 */ 12203 if (new_crtc_state->async_flip && 12204 (lock_and_validation_needed || 12205 amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) { 12206 drm_dbg_atomic(crtc->dev, 12207 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 12208 crtc->base.id, crtc->name); 12209 ret = -EINVAL; 12210 goto fail; 12211 } 12212 12213 dm_new_crtc_state->update_type = lock_and_validation_needed ? 12214 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 12215 } 12216 12217 /* Must be success */ 12218 WARN_ON(ret); 12219 12220 trace_amdgpu_dm_atomic_check_finish(state, ret); 12221 12222 return ret; 12223 12224 fail: 12225 if (ret == -EDEADLK) 12226 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); 12227 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 12228 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); 12229 else 12230 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); 12231 12232 trace_amdgpu_dm_atomic_check_finish(state, ret); 12233 12234 return ret; 12235 } 12236 12237 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 12238 unsigned int offset, 12239 unsigned int total_length, 12240 u8 *data, 12241 unsigned int length, 12242 struct amdgpu_hdmi_vsdb_info *vsdb) 12243 { 12244 bool res; 12245 union dmub_rb_cmd cmd; 12246 struct dmub_cmd_send_edid_cea *input; 12247 struct dmub_cmd_edid_cea_output *output; 12248 12249 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 12250 return false; 12251 12252 memset(&cmd, 0, sizeof(cmd)); 12253 12254 input = &cmd.edid_cea.data.input; 12255 12256 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 12257 cmd.edid_cea.header.sub_type = 0; 12258 cmd.edid_cea.header.payload_bytes = 12259 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 12260 input->offset = offset; 12261 input->length = length; 12262 input->cea_total_length = total_length; 12263 memcpy(input->payload, data, length); 12264 12265 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 12266 if (!res) { 12267 drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n"); 12268 return false; 12269 } 12270 12271 output = &cmd.edid_cea.data.output; 12272 12273 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 12274 if (!output->ack.success) { 12275 drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n", 12276 output->ack.offset); 12277 } 12278 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 12279 if (!output->amd_vsdb.vsdb_found) 12280 return false; 12281 12282 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 12283 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 12284 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 12285 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 12286 } else { 12287 drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); 12288 return false; 12289 } 12290 12291 return true; 12292 } 12293 12294 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 12295 u8 *edid_ext, int len, 12296 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12297 { 12298 int i; 12299 12300 /* send extension block to DMCU for parsing */ 12301 for (i = 0; i < len; i += 8) { 12302 bool res; 12303 int offset; 12304 12305 /* send 8 bytes a time */ 12306 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 12307 return false; 12308 12309 if (i+8 == len) { 12310 /* EDID block sent completed, expect result */ 12311 int version, min_rate, max_rate; 12312 12313 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 12314 if (res) { 12315 /* amd vsdb found */ 12316 vsdb_info->freesync_supported = 1; 12317 vsdb_info->amd_vsdb_version = version; 12318 vsdb_info->min_refresh_rate_hz = min_rate; 12319 vsdb_info->max_refresh_rate_hz = max_rate; 12320 return true; 12321 } 12322 /* not amd vsdb */ 12323 return false; 12324 } 12325 12326 /* check for ack*/ 12327 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 12328 if (!res) 12329 return false; 12330 } 12331 12332 return false; 12333 } 12334 12335 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 12336 u8 *edid_ext, int len, 12337 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12338 { 12339 int i; 12340 12341 /* send extension block to DMCU for parsing */ 12342 for (i = 0; i < len; i += 8) { 12343 /* send 8 bytes a time */ 12344 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 12345 return false; 12346 } 12347 12348 return vsdb_info->freesync_supported; 12349 } 12350 12351 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 12352 u8 *edid_ext, int len, 12353 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12354 { 12355 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 12356 bool ret; 12357 12358 mutex_lock(&adev->dm.dc_lock); 12359 if (adev->dm.dmub_srv) 12360 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 12361 else 12362 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 12363 mutex_unlock(&adev->dm.dc_lock); 12364 return ret; 12365 } 12366 12367 static void parse_edid_displayid_vrr(struct drm_connector *connector, 12368 const struct edid *edid) 12369 { 12370 u8 *edid_ext = NULL; 12371 int i; 12372 int j = 0; 12373 u16 min_vfreq; 12374 u16 max_vfreq; 12375 12376 if (edid == NULL || edid->extensions == 0) 12377 return; 12378 12379 /* Find DisplayID extension */ 12380 for (i = 0; i < edid->extensions; i++) { 12381 edid_ext = (void *)(edid + (i + 1)); 12382 if (edid_ext[0] == DISPLAYID_EXT) 12383 break; 12384 } 12385 12386 if (edid_ext == NULL) 12387 return; 12388 12389 while (j < EDID_LENGTH) { 12390 /* Get dynamic video timing range from DisplayID if available */ 12391 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && 12392 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { 12393 min_vfreq = edid_ext[j+9]; 12394 if (edid_ext[j+1] & 7) 12395 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); 12396 else 12397 max_vfreq = edid_ext[j+10]; 12398 12399 if (max_vfreq && min_vfreq) { 12400 connector->display_info.monitor_range.max_vfreq = max_vfreq; 12401 connector->display_info.monitor_range.min_vfreq = min_vfreq; 12402 12403 return; 12404 } 12405 } 12406 j++; 12407 } 12408 } 12409 12410 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12411 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 12412 { 12413 u8 *edid_ext = NULL; 12414 int i; 12415 int j = 0; 12416 12417 if (edid == NULL || edid->extensions == 0) 12418 return -ENODEV; 12419 12420 /* Find DisplayID extension */ 12421 for (i = 0; i < edid->extensions; i++) { 12422 edid_ext = (void *)(edid + (i + 1)); 12423 if (edid_ext[0] == DISPLAYID_EXT) 12424 break; 12425 } 12426 12427 while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 12428 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 12429 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 12430 12431 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 12432 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 12433 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 12434 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 12435 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 12436 12437 return true; 12438 } 12439 j++; 12440 } 12441 12442 return false; 12443 } 12444 12445 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 12446 const struct edid *edid, 12447 struct amdgpu_hdmi_vsdb_info *vsdb_info) 12448 { 12449 u8 *edid_ext = NULL; 12450 int i; 12451 bool valid_vsdb_found = false; 12452 12453 /*----- drm_find_cea_extension() -----*/ 12454 /* No EDID or EDID extensions */ 12455 if (edid == NULL || edid->extensions == 0) 12456 return -ENODEV; 12457 12458 /* Find CEA extension */ 12459 for (i = 0; i < edid->extensions; i++) { 12460 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 12461 if (edid_ext[0] == CEA_EXT) 12462 break; 12463 } 12464 12465 if (i == edid->extensions) 12466 return -ENODEV; 12467 12468 /*----- cea_db_offsets() -----*/ 12469 if (edid_ext[0] != CEA_EXT) 12470 return -ENODEV; 12471 12472 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 12473 12474 return valid_vsdb_found ? i : -ENODEV; 12475 } 12476 12477 /** 12478 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 12479 * 12480 * @connector: Connector to query. 12481 * @drm_edid: DRM EDID from monitor 12482 * 12483 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 12484 * track of some of the display information in the internal data struct used by 12485 * amdgpu_dm. This function checks which type of connector we need to set the 12486 * FreeSync parameters. 12487 */ 12488 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 12489 const struct drm_edid *drm_edid) 12490 { 12491 int i = 0; 12492 struct amdgpu_dm_connector *amdgpu_dm_connector = 12493 to_amdgpu_dm_connector(connector); 12494 struct dm_connector_state *dm_con_state = NULL; 12495 struct dc_sink *sink; 12496 struct amdgpu_device *adev = drm_to_adev(connector->dev); 12497 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 12498 const struct edid *edid; 12499 bool freesync_capable = false; 12500 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 12501 12502 if (!connector->state) { 12503 drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__); 12504 goto update; 12505 } 12506 12507 sink = amdgpu_dm_connector->dc_sink ? 12508 amdgpu_dm_connector->dc_sink : 12509 amdgpu_dm_connector->dc_em_sink; 12510 12511 drm_edid_connector_update(connector, drm_edid); 12512 12513 if (!drm_edid || !sink) { 12514 dm_con_state = to_dm_connector_state(connector->state); 12515 12516 amdgpu_dm_connector->min_vfreq = 0; 12517 amdgpu_dm_connector->max_vfreq = 0; 12518 freesync_capable = false; 12519 12520 goto update; 12521 } 12522 12523 dm_con_state = to_dm_connector_state(connector->state); 12524 12525 if (!adev->dm.freesync_module) 12526 goto update; 12527 12528 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 12529 12530 /* Some eDP panels only have the refresh rate range info in DisplayID */ 12531 if ((connector->display_info.monitor_range.min_vfreq == 0 || 12532 connector->display_info.monitor_range.max_vfreq == 0)) 12533 parse_edid_displayid_vrr(connector, edid); 12534 12535 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 12536 sink->sink_signal == SIGNAL_TYPE_EDP)) { 12537 if (amdgpu_dm_connector->dc_link && 12538 amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { 12539 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 12540 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 12541 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12542 freesync_capable = true; 12543 } 12544 12545 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12546 12547 if (vsdb_info.replay_mode) { 12548 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 12549 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 12550 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 12551 } 12552 12553 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 12554 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12555 if (i >= 0 && vsdb_info.freesync_supported) { 12556 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12557 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12558 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12559 freesync_capable = true; 12560 12561 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12562 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12563 } 12564 } 12565 12566 if (amdgpu_dm_connector->dc_link) 12567 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 12568 12569 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 12570 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 12571 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 12572 12573 amdgpu_dm_connector->pack_sdp_v1_3 = true; 12574 amdgpu_dm_connector->as_type = as_type; 12575 amdgpu_dm_connector->vsdb_info = vsdb_info; 12576 12577 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 12578 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 12579 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 12580 freesync_capable = true; 12581 12582 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 12583 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 12584 } 12585 } 12586 12587 update: 12588 if (dm_con_state) 12589 dm_con_state->freesync_capable = freesync_capable; 12590 12591 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && 12592 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { 12593 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; 12594 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; 12595 } 12596 12597 if (connector->vrr_capable_property) 12598 drm_connector_set_vrr_capable_property(connector, 12599 freesync_capable); 12600 } 12601 12602 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 12603 { 12604 struct amdgpu_device *adev = drm_to_adev(dev); 12605 struct dc *dc = adev->dm.dc; 12606 int i; 12607 12608 mutex_lock(&adev->dm.dc_lock); 12609 if (dc->current_state) { 12610 for (i = 0; i < dc->current_state->stream_count; ++i) 12611 dc->current_state->streams[i] 12612 ->triggered_crtc_reset.enabled = 12613 adev->dm.force_timing_sync; 12614 12615 dm_enable_per_frame_crtc_master_sync(dc->current_state); 12616 dc_trigger_sync(dc, dc->current_state); 12617 } 12618 mutex_unlock(&adev->dm.dc_lock); 12619 } 12620 12621 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) 12622 { 12623 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) 12624 dc_exit_ips_for_hw_access(dc); 12625 } 12626 12627 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 12628 u32 value, const char *func_name) 12629 { 12630 #ifdef DM_CHECK_ADDR_0 12631 if (address == 0) { 12632 drm_err(adev_to_drm(ctx->driver_context), 12633 "invalid register write. address = 0"); 12634 return; 12635 } 12636 #endif 12637 12638 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12639 cgs_write_register(ctx->cgs_device, address, value); 12640 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 12641 } 12642 12643 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 12644 const char *func_name) 12645 { 12646 u32 value; 12647 #ifdef DM_CHECK_ADDR_0 12648 if (address == 0) { 12649 drm_err(adev_to_drm(ctx->driver_context), 12650 "invalid register read; address = 0\n"); 12651 return 0; 12652 } 12653 #endif 12654 12655 if (ctx->dmub_srv && 12656 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 12657 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 12658 ASSERT(false); 12659 return 0; 12660 } 12661 12662 amdgpu_dm_exit_ips_for_hw_access(ctx->dc); 12663 12664 value = cgs_read_register(ctx->cgs_device, address); 12665 12666 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 12667 12668 return value; 12669 } 12670 12671 int amdgpu_dm_process_dmub_aux_transfer_sync( 12672 struct dc_context *ctx, 12673 unsigned int link_index, 12674 struct aux_payload *payload, 12675 enum aux_return_code_type *operation_result) 12676 { 12677 struct amdgpu_device *adev = ctx->driver_context; 12678 struct dmub_notification *p_notify = adev->dm.dmub_notify; 12679 int ret = -1; 12680 12681 mutex_lock(&adev->dm.dpia_aux_lock); 12682 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 12683 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 12684 goto out; 12685 } 12686 12687 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12688 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); 12689 *operation_result = AUX_RET_ERROR_TIMEOUT; 12690 goto out; 12691 } 12692 12693 if (p_notify->result != AUX_RET_SUCCESS) { 12694 /* 12695 * Transient states before tunneling is enabled could 12696 * lead to this error. We can ignore this for now. 12697 */ 12698 if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { 12699 drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n", 12700 payload->address, payload->length, 12701 p_notify->result); 12702 } 12703 *operation_result = p_notify->result; 12704 goto out; 12705 } 12706 12707 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; 12708 if (adev->dm.dmub_notify->aux_reply.command & 0xF0) 12709 /* The reply is stored in the top nibble of the command. */ 12710 payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; 12711 12712 /*write req may receive a byte indicating partially written number as well*/ 12713 if (p_notify->aux_reply.length) 12714 memcpy(payload->data, p_notify->aux_reply.data, 12715 p_notify->aux_reply.length); 12716 12717 /* success */ 12718 ret = p_notify->aux_reply.length; 12719 *operation_result = p_notify->result; 12720 out: 12721 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12722 mutex_unlock(&adev->dm.dpia_aux_lock); 12723 return ret; 12724 } 12725 12726 static void abort_fused_io( 12727 struct dc_context *ctx, 12728 const struct dmub_cmd_fused_request *request 12729 ) 12730 { 12731 union dmub_rb_cmd command = { 0 }; 12732 struct dmub_rb_cmd_fused_io *io = &command.fused_io; 12733 12734 io->header.type = DMUB_CMD__FUSED_IO; 12735 io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; 12736 io->header.payload_bytes = sizeof(*io) - sizeof(io->header); 12737 io->request = *request; 12738 dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); 12739 } 12740 12741 static bool execute_fused_io( 12742 struct amdgpu_device *dev, 12743 struct dc_context *ctx, 12744 union dmub_rb_cmd *commands, 12745 uint8_t count, 12746 uint32_t timeout_us 12747 ) 12748 { 12749 const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; 12750 12751 if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) 12752 return false; 12753 12754 struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; 12755 struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; 12756 const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 12757 && first->header.ret_status 12758 && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; 12759 12760 if (!result) 12761 return false; 12762 12763 while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { 12764 reinit_completion(&sync->replied); 12765 12766 struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; 12767 12768 static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); 12769 12770 if (reply->identifier == first->request.identifier) { 12771 first->request = *reply; 12772 return true; 12773 } 12774 } 12775 12776 reinit_completion(&sync->replied); 12777 first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; 12778 abort_fused_io(ctx, &first->request); 12779 return false; 12780 } 12781 12782 bool amdgpu_dm_execute_fused_io( 12783 struct amdgpu_device *dev, 12784 struct dc_link *link, 12785 union dmub_rb_cmd *commands, 12786 uint8_t count, 12787 uint32_t timeout_us) 12788 { 12789 struct amdgpu_display_manager *dm = &dev->dm; 12790 12791 mutex_lock(&dm->dpia_aux_lock); 12792 12793 const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); 12794 12795 mutex_unlock(&dm->dpia_aux_lock); 12796 return result; 12797 } 12798 12799 int amdgpu_dm_process_dmub_set_config_sync( 12800 struct dc_context *ctx, 12801 unsigned int link_index, 12802 struct set_config_cmd_payload *payload, 12803 enum set_config_status *operation_result) 12804 { 12805 struct amdgpu_device *adev = ctx->driver_context; 12806 bool is_cmd_complete; 12807 int ret; 12808 12809 mutex_lock(&adev->dm.dpia_aux_lock); 12810 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 12811 link_index, payload, adev->dm.dmub_notify); 12812 12813 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 12814 ret = 0; 12815 *operation_result = adev->dm.dmub_notify->sc_status; 12816 } else { 12817 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); 12818 ret = -1; 12819 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 12820 } 12821 12822 if (!is_cmd_complete) 12823 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12824 mutex_unlock(&adev->dm.dpia_aux_lock); 12825 return ret; 12826 } 12827 12828 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12829 { 12830 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 12831 } 12832 12833 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 12834 { 12835 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 12836 } 12837 12838 void dm_acpi_process_phy_transition_interlock( 12839 const struct dc_context *ctx, 12840 struct dm_process_phy_transition_init_params process_phy_transition_init_params) 12841 { 12842 // Not yet implemented 12843 } 12844