1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2012-15 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include <linux/vmalloc.h> 28 #include <drm/display/drm_dp_helper.h> 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_helper.h> 32 #include <drm/drm_fixed.h> 33 #include <drm/drm_edid.h> 34 #include "dm_services.h" 35 #include "amdgpu.h" 36 #include "amdgpu_dm.h" 37 #include "amdgpu_dm_mst_types.h" 38 #include "amdgpu_dm_hdcp.h" 39 40 #include "dc.h" 41 #include "dm_helpers.h" 42 43 #include "ddc_service_types.h" 44 #include "dpcd_defs.h" 45 46 #include "dmub_cmd.h" 47 #if defined(CONFIG_DEBUG_FS) 48 #include "amdgpu_dm_debugfs.h" 49 #endif 50 51 #include "dc/resource/dcn20/dcn20_resource.h" 52 53 #define PEAK_FACTOR_X1000 1006 54 55 /* 56 * This function handles both native AUX and I2C-Over-AUX transactions. 57 */ 58 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, 59 struct drm_dp_aux_msg *msg) 60 { 61 ssize_t result = 0; 62 struct aux_payload payload; 63 enum aux_return_code_type operation_result; 64 struct amdgpu_device *adev; 65 struct ddc_service *ddc; 66 uint8_t copy[16]; 67 68 if (WARN_ON(msg->size > 16)) 69 return -E2BIG; 70 71 payload.address = msg->address; 72 payload.data = msg->buffer; 73 payload.length = msg->size; 74 payload.reply = &msg->reply; 75 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; 76 payload.write = (msg->request & DP_AUX_I2C_READ) == 0; 77 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; 78 payload.write_status_update = 79 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; 80 payload.defer_delay = 0; 81 82 if (payload.write) { 83 memcpy(copy, msg->buffer, msg->size); 84 payload.data = copy; 85 } 86 87 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, 88 &operation_result); 89 90 /* 91 * w/a on certain intel platform where hpd is unexpected to pull low during 92 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON 93 * aux transaction is succuess in such case, therefore bypass the error 94 */ 95 ddc = TO_DM_AUX(aux)->ddc_service; 96 adev = ddc->ctx->driver_context; 97 if (adev->dm.aux_hpd_discon_quirk) { 98 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && 99 operation_result == AUX_RET_ERROR_HPD_DISCON) { 100 result = msg->size; 101 operation_result = AUX_RET_SUCCESS; 102 } 103 } 104 105 /* 106 * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER 107 */ 108 if (payload.write && result >= 0) { 109 if (result) { 110 /*one byte indicating partially written bytes*/ 111 drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n"); 112 result = payload.data[0]; 113 } else if (!payload.reply[0]) 114 /*I2C_ACK|AUX_ACK*/ 115 result = msg->size; 116 } 117 118 if (result < 0) { 119 switch (operation_result) { 120 case AUX_RET_SUCCESS: 121 break; 122 case AUX_RET_ERROR_HPD_DISCON: 123 case AUX_RET_ERROR_UNKNOWN: 124 case AUX_RET_ERROR_INVALID_OPERATION: 125 case AUX_RET_ERROR_PROTOCOL_ERROR: 126 result = -EIO; 127 break; 128 case AUX_RET_ERROR_INVALID_REPLY: 129 case AUX_RET_ERROR_ENGINE_ACQUIRE: 130 result = -EBUSY; 131 break; 132 case AUX_RET_ERROR_TIMEOUT: 133 result = -ETIMEDOUT; 134 break; 135 } 136 137 drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result); 138 } 139 140 if (payload.reply[0]) 141 drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.", 142 payload.reply[0]); 143 144 return result; 145 } 146 147 static void 148 dm_dp_mst_connector_destroy(struct drm_connector *connector) 149 { 150 struct amdgpu_dm_connector *aconnector = 151 to_amdgpu_dm_connector(connector); 152 153 if (aconnector->dc_sink) { 154 dc_link_remove_remote_sink(aconnector->dc_link, 155 aconnector->dc_sink); 156 dc_sink_release(aconnector->dc_sink); 157 } 158 159 drm_edid_free(aconnector->drm_edid); 160 161 drm_connector_cleanup(connector); 162 drm_dp_mst_put_port_malloc(aconnector->mst_output_port); 163 kfree(aconnector); 164 } 165 166 static int 167 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) 168 { 169 struct amdgpu_dm_connector *amdgpu_dm_connector = 170 to_amdgpu_dm_connector(connector); 171 int r; 172 173 r = drm_dp_mst_connector_late_register(connector, 174 amdgpu_dm_connector->mst_output_port); 175 if (r < 0) 176 return r; 177 178 #if defined(CONFIG_DEBUG_FS) 179 connector_debugfs_init(amdgpu_dm_connector); 180 #endif 181 182 return 0; 183 } 184 185 186 static inline void 187 amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector) 188 { 189 aconnector->drm_edid = NULL; 190 aconnector->dsc_aux = NULL; 191 aconnector->mst_output_port->passthrough_aux = NULL; 192 aconnector->mst_local_bw = 0; 193 aconnector->vc_full_pbn = 0; 194 } 195 196 static void 197 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) 198 { 199 struct amdgpu_dm_connector *aconnector = 200 to_amdgpu_dm_connector(connector); 201 struct drm_dp_mst_port *port = aconnector->mst_output_port; 202 struct amdgpu_dm_connector *root = aconnector->mst_root; 203 struct dc_link *dc_link = aconnector->dc_link; 204 struct dc_sink *dc_sink = aconnector->dc_sink; 205 206 drm_dp_mst_connector_early_unregister(connector, port); 207 208 /* 209 * Release dc_sink for connector which its attached port is 210 * no longer in the mst topology 211 */ 212 drm_modeset_lock(&root->mst_mgr.base.lock, NULL); 213 if (dc_sink) { 214 if (dc_link->sink_count) 215 dc_link_remove_remote_sink(dc_link, dc_sink); 216 217 drm_dbg_dp(connector->dev, 218 "DM_MST: remove remote sink 0x%p, %d remaining\n", 219 dc_sink, dc_link->sink_count); 220 221 dc_sink_release(dc_sink); 222 aconnector->dc_sink = NULL; 223 amdgpu_dm_mst_reset_mst_connector_setting(aconnector); 224 } 225 226 aconnector->mst_status = MST_STATUS_DEFAULT; 227 drm_modeset_unlock(&root->mst_mgr.base.lock); 228 } 229 230 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { 231 .fill_modes = drm_helper_probe_single_connector_modes, 232 .destroy = dm_dp_mst_connector_destroy, 233 .reset = amdgpu_dm_connector_funcs_reset, 234 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 235 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 236 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 237 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 238 .late_register = amdgpu_dm_mst_connector_late_register, 239 .early_unregister = amdgpu_dm_mst_connector_early_unregister, 240 }; 241 242 bool needs_dsc_aux_workaround(struct dc_link *link) 243 { 244 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && 245 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && 246 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) 247 return true; 248 249 return false; 250 } 251 252 #if defined(CONFIG_DRM_AMD_DC_FP) 253 static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) 254 { 255 u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F 256 257 if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) { 258 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && 259 IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { 260 DRM_INFO("Synaptics Cascaded MST hub\n"); 261 return true; 262 } 263 } 264 265 return false; 266 } 267 268 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) 269 { 270 struct dc_sink *dc_sink = aconnector->dc_sink; 271 struct drm_dp_mst_port *port = aconnector->mst_output_port; 272 u8 dsc_caps[16] = { 0 }; 273 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 274 u8 *dsc_branch_dec_caps = NULL; 275 276 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); 277 278 /* 279 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs 280 * because it only check the dsc/fec caps of the "port variable" and not the dock 281 * 282 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display 283 * 284 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux 285 * 286 */ 287 if (!aconnector->dsc_aux && !port->parent->port_parent && 288 needs_dsc_aux_workaround(aconnector->dc_link)) 289 aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; 290 291 /* synaptics cascaded MST hub case */ 292 if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) 293 aconnector->dsc_aux = port->mgr->aux; 294 295 if (!aconnector->dsc_aux) 296 return false; 297 298 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) 299 return false; 300 301 if (drm_dp_dpcd_read(aconnector->dsc_aux, 302 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3) 303 dsc_branch_dec_caps = dsc_branch_dec_caps_raw; 304 305 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 306 dsc_caps, dsc_branch_dec_caps, 307 &dc_sink->dsc_caps.dsc_dec_caps)) 308 return false; 309 310 return true; 311 } 312 #endif 313 314 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) 315 { 316 union dp_downstream_port_present ds_port_present; 317 318 if (!aconnector->dsc_aux) 319 return false; 320 321 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { 322 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); 323 return false; 324 } 325 326 aconnector->mst_downstream_port_present = ds_port_present; 327 DRM_INFO("Downstream port present %d, type %d\n", 328 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); 329 330 return true; 331 } 332 333 static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector) 334 { 335 struct drm_connector *connector = &aconnector->base; 336 struct drm_dp_mst_port *port = aconnector->mst_output_port; 337 struct drm_dp_mst_port *port_parent; 338 struct drm_dp_aux *immediate_upstream_aux; 339 struct drm_dp_desc branch_desc; 340 341 if (!port->parent) 342 return false; 343 344 port_parent = port->parent->port_parent; 345 346 immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux; 347 348 if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true)) 349 return false; 350 351 aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) + 352 (branch_desc.ident.oui[1] << 8) + 353 (branch_desc.ident.oui[2]); 354 355 drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n", 356 aconnector->branch_ieee_oui, connector->name); 357 358 return true; 359 } 360 361 static int dm_dp_mst_get_modes(struct drm_connector *connector) 362 { 363 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 364 int ret = 0; 365 366 if (!aconnector) 367 return drm_add_edid_modes(connector, NULL); 368 369 if (!aconnector->drm_edid) { 370 const struct drm_edid *drm_edid; 371 372 drm_edid = drm_dp_mst_edid_read(connector, 373 &aconnector->mst_root->mst_mgr, 374 aconnector->mst_output_port); 375 376 if (!drm_edid) { 377 amdgpu_dm_set_mst_status(&aconnector->mst_status, 378 MST_REMOTE_EDID, false); 379 380 drm_edid_connector_update( 381 &aconnector->base, 382 NULL); 383 384 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); 385 if (!aconnector->dc_sink) { 386 struct dc_sink *dc_sink; 387 struct dc_sink_init_data init_params = { 388 .link = aconnector->dc_link, 389 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 390 391 dc_sink = dc_link_add_remote_sink( 392 aconnector->dc_link, 393 NULL, 394 0, 395 &init_params); 396 397 if (!dc_sink) { 398 DRM_ERROR("Unable to add a remote sink\n"); 399 return 0; 400 } 401 402 drm_dbg_dp(connector->dev, 403 "DM_MST: add remote sink 0x%p, %d remaining\n", 404 dc_sink, 405 aconnector->dc_link->sink_count); 406 407 dc_sink->priv = aconnector; 408 aconnector->dc_sink = dc_sink; 409 } 410 411 return ret; 412 } 413 414 aconnector->drm_edid = drm_edid; 415 amdgpu_dm_set_mst_status(&aconnector->mst_status, 416 MST_REMOTE_EDID, true); 417 } 418 419 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { 420 dc_sink_release(aconnector->dc_sink); 421 aconnector->dc_sink = NULL; 422 } 423 424 if (!aconnector->dc_sink) { 425 struct dc_sink *dc_sink; 426 struct dc_sink_init_data init_params = { 427 .link = aconnector->dc_link, 428 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 429 const struct edid *edid; 430 431 edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() 432 dc_sink = dc_link_add_remote_sink( 433 aconnector->dc_link, 434 (uint8_t *)edid, 435 (edid->extensions + 1) * EDID_LENGTH, 436 &init_params); 437 438 if (!dc_sink) { 439 DRM_ERROR("Unable to add a remote sink\n"); 440 return 0; 441 } 442 443 drm_dbg_dp(connector->dev, 444 "DM_MST: add remote sink 0x%p, %d remaining\n", 445 dc_sink, aconnector->dc_link->sink_count); 446 447 dc_sink->priv = aconnector; 448 /* dc_link_add_remote_sink returns a new reference */ 449 aconnector->dc_sink = dc_sink; 450 451 /* when display is unplugged from mst hub, connctor will be 452 * destroyed within dm_dp_mst_connector_destroy. connector 453 * hdcp perperties, like type, undesired, desired, enabled, 454 * will be lost. So, save hdcp properties into hdcp_work within 455 * amdgpu_dm_atomic_commit_tail. if the same display is 456 * plugged back with same display index, its hdcp properties 457 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 458 */ 459 if (aconnector->dc_sink && connector->state) { 460 struct drm_device *dev = connector->dev; 461 struct amdgpu_device *adev = drm_to_adev(dev); 462 463 if (adev->dm.hdcp_workqueue) { 464 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 465 struct hdcp_workqueue *hdcp_w = 466 &hdcp_work[aconnector->dc_link->link_index]; 467 468 connector->state->hdcp_content_type = 469 hdcp_w->hdcp_content_type[connector->index]; 470 connector->state->content_protection = 471 hdcp_w->content_protection[connector->index]; 472 } 473 } 474 475 if (aconnector->dc_sink) { 476 amdgpu_dm_update_freesync_caps( 477 connector, aconnector->drm_edid); 478 479 #if defined(CONFIG_DRM_AMD_DC_FP) 480 if (!validate_dsc_caps_on_connector(aconnector)) 481 memset(&aconnector->dc_sink->dsc_caps, 482 0, sizeof(aconnector->dc_sink->dsc_caps)); 483 #endif 484 485 if (!retrieve_downstream_port_device(aconnector)) 486 memset(&aconnector->mst_downstream_port_present, 487 0, sizeof(aconnector->mst_downstream_port_present)); 488 } 489 } 490 491 drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); 492 493 ret = drm_edid_connector_add_modes(connector); 494 495 return ret; 496 } 497 498 static struct drm_encoder * 499 dm_mst_atomic_best_encoder(struct drm_connector *connector, 500 struct drm_atomic_state *state) 501 { 502 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 503 connector); 504 struct amdgpu_device *adev = drm_to_adev(connector->dev); 505 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); 506 507 return &adev->dm.mst_encoders[acrtc->crtc_id].base; 508 } 509 510 static int 511 dm_dp_mst_detect(struct drm_connector *connector, 512 struct drm_modeset_acquire_ctx *ctx, bool force) 513 { 514 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 515 struct amdgpu_dm_connector *master = aconnector->mst_root; 516 struct drm_dp_mst_port *port = aconnector->mst_output_port; 517 int connection_status; 518 519 if (drm_connector_is_unregistered(connector)) 520 return connector_status_disconnected; 521 522 connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, 523 aconnector->mst_output_port); 524 525 if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { 526 uint8_t dpcd_rev; 527 int ret; 528 529 ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); 530 531 if (ret == 1) { 532 port->dpcd_rev = dpcd_rev; 533 534 /* Could be DP1.2 DP Rx case*/ 535 if (!dpcd_rev) { 536 ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); 537 538 if (ret == 1) 539 port->dpcd_rev = dpcd_rev; 540 } 541 542 if (!dpcd_rev) 543 DRM_DEBUG_KMS("Can't decide DPCD revision number!"); 544 } 545 546 /* 547 * Could be legacy sink, logical port etc on DP1.2. 548 * Will get Nack under these cases when issue remote 549 * DPCD read. 550 */ 551 if (ret != 1) 552 DRM_DEBUG_KMS("Can't access DPCD"); 553 } else if (port->pdt == DP_PEER_DEVICE_NONE) { 554 port->dpcd_rev = 0; 555 } 556 557 /* 558 * Release dc_sink for connector which unplug event is notified by CSN msg 559 */ 560 if (connection_status == connector_status_disconnected && aconnector->dc_sink) { 561 if (aconnector->dc_link->sink_count) 562 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); 563 564 drm_dbg_dp(connector->dev, 565 "DM_MST: remove remote sink 0x%p, %d remaining\n", 566 aconnector->dc_link, 567 aconnector->dc_link->sink_count); 568 569 dc_sink_release(aconnector->dc_sink); 570 aconnector->dc_sink = NULL; 571 amdgpu_dm_mst_reset_mst_connector_setting(aconnector); 572 573 amdgpu_dm_set_mst_status(&aconnector->mst_status, 574 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, 575 false); 576 } 577 578 return connection_status; 579 } 580 581 static int dm_dp_mst_atomic_check(struct drm_connector *connector, 582 struct drm_atomic_state *state) 583 { 584 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 585 struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; 586 struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; 587 588 return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); 589 } 590 591 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { 592 .get_modes = dm_dp_mst_get_modes, 593 .mode_valid = amdgpu_dm_connector_mode_valid, 594 .atomic_best_encoder = dm_mst_atomic_best_encoder, 595 .detect_ctx = dm_dp_mst_detect, 596 .atomic_check = dm_dp_mst_atomic_check, 597 }; 598 599 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 600 { 601 drm_encoder_cleanup(encoder); 602 } 603 604 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 605 .destroy = amdgpu_dm_encoder_destroy, 606 }; 607 608 void 609 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) 610 { 611 struct drm_device *dev = adev_to_drm(adev); 612 int i; 613 614 for (i = 0; i < adev->dm.display_indexes_num; i++) { 615 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; 616 struct drm_encoder *encoder = &amdgpu_encoder->base; 617 618 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 619 620 drm_encoder_init( 621 dev, 622 &amdgpu_encoder->base, 623 &amdgpu_dm_encoder_funcs, 624 DRM_MODE_ENCODER_DPMST, 625 NULL); 626 627 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 628 } 629 } 630 631 static struct drm_connector * 632 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 633 struct drm_dp_mst_port *port, 634 const char *pathprop) 635 { 636 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); 637 struct drm_device *dev = master->base.dev; 638 struct amdgpu_device *adev = drm_to_adev(dev); 639 struct amdgpu_dm_connector *aconnector; 640 struct drm_connector *connector; 641 int i; 642 643 aconnector = kzalloc_obj(*aconnector); 644 if (!aconnector) 645 return NULL; 646 647 DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port); 648 649 connector = &aconnector->base; 650 aconnector->mst_output_port = port; 651 aconnector->mst_root = master; 652 amdgpu_dm_set_mst_status(&aconnector->mst_status, 653 MST_PROBE, true); 654 655 if (drm_connector_dynamic_init( 656 dev, 657 connector, 658 &dm_dp_mst_connector_funcs, 659 DRM_MODE_CONNECTOR_DisplayPort, 660 NULL)) { 661 kfree(aconnector); 662 return NULL; 663 } 664 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); 665 666 amdgpu_dm_connector_init_helper( 667 &adev->dm, 668 aconnector, 669 DRM_MODE_CONNECTOR_DisplayPort, 670 master->dc_link, 671 master->connector_id); 672 673 for (i = 0; i < adev->dm.display_indexes_num; i++) { 674 drm_connector_attach_encoder(&aconnector->base, 675 &adev->dm.mst_encoders[i].base); 676 } 677 678 connector->max_bpc_property = master->base.max_bpc_property; 679 if (connector->max_bpc_property) 680 drm_connector_attach_max_bpc_property(connector, 8, 16); 681 682 connector->vrr_capable_property = master->base.vrr_capable_property; 683 if (connector->vrr_capable_property) 684 drm_connector_attach_vrr_capable_property(connector); 685 686 drm_object_attach_property( 687 &connector->base, 688 dev->mode_config.path_property, 689 0); 690 drm_object_attach_property( 691 &connector->base, 692 dev->mode_config.tile_property, 693 0); 694 connector->colorspace_property = master->base.colorspace_property; 695 if (connector->colorspace_property) 696 drm_connector_attach_colorspace_property(connector); 697 698 drm_connector_set_path_property(connector, pathprop); 699 700 if (!retrieve_branch_specific_data(aconnector)) 701 aconnector->branch_ieee_oui = 0; 702 703 /* 704 * Initialize connector state before adding the connectror to drm and 705 * framebuffer lists 706 */ 707 amdgpu_dm_connector_funcs_reset(connector); 708 709 drm_dp_mst_get_port_malloc(port); 710 711 return connector; 712 } 713 714 void dm_handle_mst_sideband_msg_ready_event( 715 struct drm_dp_mst_topology_mgr *mgr, 716 enum mst_msg_ready_type msg_rdy_type) 717 { 718 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 719 uint8_t dret; 720 bool new_irq_handled = false; 721 int dpcd_addr; 722 uint8_t dpcd_bytes_to_read; 723 const uint8_t max_process_count = 30; 724 uint8_t process_count = 0; 725 u8 retry; 726 struct amdgpu_dm_connector *aconnector = 727 container_of(mgr, struct amdgpu_dm_connector, mst_mgr); 728 729 730 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 731 732 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 733 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 734 /* DPCD 0x200 - 0x201 for downstream IRQ */ 735 dpcd_addr = DP_SINK_COUNT; 736 } else { 737 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 738 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 739 dpcd_addr = DP_SINK_COUNT_ESI; 740 } 741 742 mutex_lock(&aconnector->handle_mst_msg_ready); 743 744 while (process_count < max_process_count) { 745 u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; 746 747 process_count++; 748 749 dret = drm_dp_dpcd_read( 750 &aconnector->dm_dp_aux.aux, 751 dpcd_addr, 752 esi, 753 dpcd_bytes_to_read); 754 755 if (dret != dpcd_bytes_to_read) { 756 DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); 757 break; 758 } 759 760 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 761 762 switch (msg_rdy_type) { 763 case DOWN_REP_MSG_RDY_EVENT: 764 /* Only handle DOWN_REP_MSG_RDY case*/ 765 esi[1] &= DP_DOWN_REP_MSG_RDY; 766 break; 767 case UP_REQ_MSG_RDY_EVENT: 768 /* Only handle UP_REQ_MSG_RDY case*/ 769 esi[1] &= DP_UP_REQ_MSG_RDY; 770 break; 771 default: 772 /* Handle both cases*/ 773 esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); 774 break; 775 } 776 777 if (!esi[1]) 778 break; 779 780 /* handle MST irq */ 781 if (aconnector->mst_mgr.mst_state) 782 drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, 783 esi, 784 ack, 785 &new_irq_handled); 786 787 if (new_irq_handled) { 788 /* ACK at DPCD to notify down stream */ 789 for (retry = 0; retry < 3; retry++) { 790 ssize_t wret; 791 792 wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, 793 dpcd_addr + 1, 794 ack[1]); 795 if (wret == 1) 796 break; 797 } 798 799 if (retry == 3) { 800 DRM_ERROR("Failed to ack MST event.\n"); 801 break; 802 } 803 804 drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); 805 806 new_irq_handled = false; 807 } else { 808 break; 809 } 810 } 811 812 mutex_unlock(&aconnector->handle_mst_msg_ready); 813 814 if (process_count == max_process_count) 815 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 816 } 817 818 static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) 819 { 820 dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); 821 } 822 823 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 824 .add_connector = dm_dp_add_mst_connector, 825 .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, 826 }; 827 828 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 829 struct amdgpu_dm_connector *aconnector, 830 int link_index) 831 { 832 struct dc_link_settings max_link_enc_cap = {0}; 833 834 aconnector->dm_dp_aux.aux.name = 835 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", 836 link_index); 837 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; 838 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; 839 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; 840 841 drm_dp_aux_init(&aconnector->dm_dp_aux.aux); 842 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, 843 &aconnector->base); 844 drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false); 845 846 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 847 return; 848 849 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); 850 aconnector->mst_mgr.cbs = &dm_mst_cbs; 851 drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev), 852 &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id); 853 854 drm_connector_attach_dp_subconnector_property(&aconnector->base); 855 } 856 857 uint32_t dm_mst_get_pbn_divider(struct dc_link *link) 858 { 859 uint32_t pbn_div_x100; 860 uint64_t dividend, divisor; 861 862 if (!link) 863 return 0; 864 865 dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100; 866 divisor = 8 * 1000 * 54; 867 868 pbn_div_x100 = div64_u64(dividend, divisor); 869 870 return dfixed_const(pbn_div_x100) / 100; 871 } 872 873 struct dsc_mst_fairness_params { 874 struct dc_crtc_timing *timing; 875 struct dc_sink *sink; 876 struct dc_dsc_bw_range bw_range; 877 bool compression_possible; 878 struct drm_dp_mst_port *port; 879 enum dsc_clock_force_state clock_force_enable; 880 uint32_t num_slices_h; 881 uint32_t num_slices_v; 882 uint32_t bpp_overwrite; 883 struct amdgpu_dm_connector *aconnector; 884 }; 885 886 #if defined(CONFIG_DRM_AMD_DC_FP) 887 static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) 888 { 889 u8 link_coding_cap; 890 uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; 891 892 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); 893 if (link_coding_cap == DP_128b_132b_ENCODING) 894 fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; 895 896 return fec_overhead_multiplier_x1000; 897 } 898 899 static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) 900 { 901 u64 peak_kbps = kbps; 902 903 peak_kbps *= 1006; 904 peak_kbps *= fec_overhead_multiplier_x1000; 905 peak_kbps = div_u64(peak_kbps, 1000 * 1000); 906 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); 907 } 908 909 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, 910 struct dsc_mst_fairness_vars *vars, 911 int count, 912 int k) 913 { 914 struct drm_connector *drm_connector; 915 int i; 916 struct dc_dsc_config_options dsc_options = {0}; 917 918 for (i = 0; i < count; i++) { 919 drm_connector = ¶ms[i].aconnector->base; 920 921 dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options); 922 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; 923 924 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); 925 if (vars[i + k].dsc_enabled && dc_dsc_compute_config( 926 params[i].sink->ctx->dc->res_pool->dscs[0], 927 ¶ms[i].sink->dsc_caps.dsc_dec_caps, 928 &dsc_options, 929 0, 930 params[i].timing, 931 dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), 932 ¶ms[i].timing->dsc_cfg)) { 933 params[i].timing->flags.DSC = 1; 934 935 if (params[i].bpp_overwrite) 936 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; 937 else 938 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; 939 940 if (params[i].num_slices_h) 941 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; 942 943 if (params[i].num_slices_v) 944 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; 945 } else { 946 params[i].timing->flags.DSC = 0; 947 } 948 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; 949 } 950 951 for (i = 0; i < count; i++) { 952 if (params[i].sink) { 953 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 954 params[i].sink->sink_signal != SIGNAL_TYPE_NONE) 955 DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i, 956 params[i].sink->edid_caps.display_name); 957 } 958 959 DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n", 960 params[i].timing->flags.DSC, 961 params[i].timing->dsc_cfg.bits_per_pixel, 962 vars[i + k].pbn); 963 } 964 } 965 966 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) 967 { 968 struct dc_dsc_config dsc_config; 969 u64 kbps; 970 971 struct drm_connector *drm_connector = ¶m.aconnector->base; 972 struct dc_dsc_config_options dsc_options = {0}; 973 974 dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); 975 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; 976 977 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); 978 dc_dsc_compute_config( 979 param.sink->ctx->dc->res_pool->dscs[0], 980 ¶m.sink->dsc_caps.dsc_dec_caps, 981 &dsc_options, 982 (int) kbps, param.timing, 983 dc_link_get_highest_encoding_format(param.aconnector->dc_link), 984 &dsc_config); 985 986 return dsc_config.bits_per_pixel; 987 } 988 989 static int increase_dsc_bpp(struct drm_atomic_state *state, 990 struct drm_dp_mst_topology_state *mst_state, 991 struct dc_link *dc_link, 992 struct dsc_mst_fairness_params *params, 993 struct dsc_mst_fairness_vars *vars, 994 int count, 995 int k) 996 { 997 int i; 998 bool bpp_increased[MAX_PIPES]; 999 int initial_slack[MAX_PIPES]; 1000 int min_initial_slack; 1001 int next_index; 1002 int remaining_to_increase = 0; 1003 int link_timeslots_used; 1004 int fair_pbn_alloc; 1005 int ret = 0; 1006 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1007 1008 for (i = 0; i < count; i++) { 1009 if (vars[i + k].dsc_enabled) { 1010 initial_slack[i] = 1011 kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; 1012 bpp_increased[i] = false; 1013 remaining_to_increase += 1; 1014 } else { 1015 initial_slack[i] = 0; 1016 bpp_increased[i] = true; 1017 } 1018 } 1019 1020 while (remaining_to_increase) { 1021 next_index = -1; 1022 min_initial_slack = -1; 1023 for (i = 0; i < count; i++) { 1024 if (!bpp_increased[i]) { 1025 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { 1026 min_initial_slack = initial_slack[i]; 1027 next_index = i; 1028 } 1029 } 1030 } 1031 1032 if (next_index == -1) 1033 break; 1034 1035 link_timeslots_used = 0; 1036 1037 for (i = 0; i < count; i++) 1038 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); 1039 1040 fair_pbn_alloc = 1041 (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); 1042 1043 if (initial_slack[next_index] > fair_pbn_alloc) { 1044 vars[next_index].pbn += fair_pbn_alloc; 1045 ret = drm_dp_atomic_find_time_slots(state, 1046 params[next_index].port->mgr, 1047 params[next_index].port, 1048 vars[next_index].pbn); 1049 if (ret < 0) 1050 return ret; 1051 1052 ret = drm_dp_mst_atomic_check(state); 1053 if (ret == 0) { 1054 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); 1055 } else { 1056 vars[next_index].pbn -= fair_pbn_alloc; 1057 ret = drm_dp_atomic_find_time_slots(state, 1058 params[next_index].port->mgr, 1059 params[next_index].port, 1060 vars[next_index].pbn); 1061 if (ret < 0) 1062 return ret; 1063 } 1064 } else { 1065 vars[next_index].pbn += initial_slack[next_index]; 1066 ret = drm_dp_atomic_find_time_slots(state, 1067 params[next_index].port->mgr, 1068 params[next_index].port, 1069 vars[next_index].pbn); 1070 if (ret < 0) 1071 return ret; 1072 1073 ret = drm_dp_mst_atomic_check(state); 1074 if (ret == 0) { 1075 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; 1076 } else { 1077 vars[next_index].pbn -= initial_slack[next_index]; 1078 ret = drm_dp_atomic_find_time_slots(state, 1079 params[next_index].port->mgr, 1080 params[next_index].port, 1081 vars[next_index].pbn); 1082 if (ret < 0) 1083 return ret; 1084 } 1085 } 1086 1087 bpp_increased[next_index] = true; 1088 remaining_to_increase--; 1089 } 1090 return 0; 1091 } 1092 1093 static int try_disable_dsc(struct drm_atomic_state *state, 1094 struct dc_link *dc_link, 1095 struct dsc_mst_fairness_params *params, 1096 struct dsc_mst_fairness_vars *vars, 1097 int count, 1098 int k) 1099 { 1100 int i; 1101 bool tried[MAX_PIPES]; 1102 int kbps_increase[MAX_PIPES]; 1103 int max_kbps_increase; 1104 int next_index; 1105 int remaining_to_try = 0; 1106 int ret; 1107 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1108 int var_pbn; 1109 1110 for (i = 0; i < count; i++) { 1111 if (vars[i + k].dsc_enabled 1112 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 1113 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { 1114 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; 1115 tried[i] = false; 1116 remaining_to_try += 1; 1117 } else { 1118 kbps_increase[i] = 0; 1119 tried[i] = true; 1120 } 1121 } 1122 1123 while (remaining_to_try) { 1124 next_index = -1; 1125 max_kbps_increase = -1; 1126 for (i = 0; i < count; i++) { 1127 if (!tried[i]) { 1128 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { 1129 max_kbps_increase = kbps_increase[i]; 1130 next_index = i; 1131 } 1132 } 1133 } 1134 1135 if (next_index == -1) 1136 break; 1137 1138 DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); 1139 var_pbn = vars[next_index].pbn; 1140 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1141 ret = drm_dp_atomic_find_time_slots(state, 1142 params[next_index].port->mgr, 1143 params[next_index].port, 1144 vars[next_index].pbn); 1145 if (ret < 0) { 1146 DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", 1147 __func__, __LINE__, next_index, ret); 1148 vars[next_index].pbn = var_pbn; 1149 return ret; 1150 } 1151 1152 ret = drm_dp_mst_atomic_check(state); 1153 if (ret == 0) { 1154 DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index); 1155 vars[next_index].dsc_enabled = false; 1156 vars[next_index].bpp_x16 = 0; 1157 } else { 1158 DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index); 1159 vars[next_index].pbn = var_pbn; 1160 ret = drm_dp_atomic_find_time_slots(state, 1161 params[next_index].port->mgr, 1162 params[next_index].port, 1163 vars[next_index].pbn); 1164 if (ret < 0) { 1165 DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", 1166 __func__, __LINE__, next_index, ret); 1167 return ret; 1168 } 1169 } 1170 1171 tried[next_index] = true; 1172 remaining_to_try--; 1173 } 1174 return 0; 1175 } 1176 1177 static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k) 1178 { 1179 int i; 1180 1181 for (i = 0; i < count; i++) 1182 DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n", 1183 i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn); 1184 } 1185 1186 static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 1187 struct dc_state *dc_state, 1188 struct dc_link *dc_link, 1189 struct dsc_mst_fairness_vars *vars, 1190 struct drm_dp_mst_topology_mgr *mgr, 1191 int *link_vars_start_index) 1192 { 1193 struct dc_stream_state *stream; 1194 struct dsc_mst_fairness_params params[MAX_PIPES]; 1195 struct amdgpu_dm_connector *aconnector; 1196 struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); 1197 int count = 0; 1198 int i, k, ret; 1199 bool debugfs_overwrite = false; 1200 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); 1201 struct drm_connector_state *new_conn_state; 1202 1203 memset(params, 0, sizeof(params)); 1204 1205 if (IS_ERR(mst_state)) 1206 return PTR_ERR(mst_state); 1207 1208 /* Set up params */ 1209 DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); 1210 for (i = 0; i < dc_state->stream_count; i++) { 1211 struct dc_dsc_policy dsc_policy = {0}; 1212 1213 stream = dc_state->streams[i]; 1214 1215 if (stream->link != dc_link) 1216 continue; 1217 1218 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1219 if (!aconnector) 1220 continue; 1221 1222 if (!aconnector->mst_output_port) 1223 continue; 1224 1225 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); 1226 1227 if (!new_conn_state) { 1228 DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", 1229 __func__, __LINE__, stream); 1230 continue; 1231 } 1232 1233 stream->timing.flags.DSC = 0; 1234 1235 params[count].timing = &stream->timing; 1236 params[count].sink = stream->sink; 1237 params[count].aconnector = aconnector; 1238 params[count].port = aconnector->mst_output_port; 1239 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; 1240 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) 1241 debugfs_overwrite = true; 1242 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 1243 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 1244 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; 1245 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; 1246 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); 1247 if (!dc_dsc_compute_bandwidth_range( 1248 stream->sink->ctx->dc->res_pool->dscs[0], 1249 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 1250 dsc_policy.min_target_bpp * 16, 1251 dsc_policy.max_target_bpp * 16, 1252 &stream->sink->dsc_caps.dsc_dec_caps, 1253 &stream->timing, 1254 dc_link_get_highest_encoding_format(dc_link), 1255 ¶ms[count].bw_range)) 1256 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 1257 dc_link_get_highest_encoding_format(dc_link)); 1258 1259 DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n", 1260 count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps, 1261 params[count].bw_range.stream_kbps); 1262 count++; 1263 } 1264 1265 DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); 1266 1267 if (count == 0) { 1268 ASSERT(0); 1269 return 0; 1270 } 1271 1272 /* k is start index of vars for current phy link used by mst hub */ 1273 k = *link_vars_start_index; 1274 /* set vars start index for next mst hub phy link */ 1275 *link_vars_start_index += count; 1276 1277 /* Try no compression */ 1278 DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); 1279 for (i = 0; i < count; i++) { 1280 vars[i + k].aconnector = params[i].aconnector; 1281 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1282 vars[i + k].dsc_enabled = false; 1283 vars[i + k].bpp_x16 = 0; 1284 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, 1285 vars[i + k].pbn); 1286 if (ret < 0) 1287 return ret; 1288 } 1289 ret = drm_dp_mst_atomic_check(state); 1290 if (ret == 0 && !debugfs_overwrite) { 1291 set_dsc_configs_from_fairness_vars(params, vars, count, k); 1292 return 0; 1293 } else if (ret != -ENOSPC) { 1294 return ret; 1295 } 1296 1297 log_dsc_params(count, vars, k); 1298 1299 /* Try max compression */ 1300 DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); 1301 for (i = 0; i < count; i++) { 1302 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { 1303 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); 1304 vars[i + k].dsc_enabled = true; 1305 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; 1306 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 1307 params[i].port, vars[i + k].pbn); 1308 if (ret < 0) 1309 return ret; 1310 } else { 1311 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); 1312 vars[i + k].dsc_enabled = false; 1313 vars[i + k].bpp_x16 = 0; 1314 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, 1315 params[i].port, vars[i + k].pbn); 1316 if (ret < 0) 1317 return ret; 1318 } 1319 } 1320 ret = drm_dp_mst_atomic_check(state); 1321 if (ret != 0) 1322 return ret; 1323 1324 log_dsc_params(count, vars, k); 1325 1326 /* Optimize degree of compression */ 1327 DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n"); 1328 ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); 1329 if (ret < 0) { 1330 DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n"); 1331 return ret; 1332 } 1333 1334 log_dsc_params(count, vars, k); 1335 1336 DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n"); 1337 ret = try_disable_dsc(state, dc_link, params, vars, count, k); 1338 if (ret < 0) { 1339 DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n"); 1340 return ret; 1341 } 1342 1343 log_dsc_params(count, vars, k); 1344 1345 set_dsc_configs_from_fairness_vars(params, vars, count, k); 1346 1347 return 0; 1348 } 1349 1350 static bool is_dsc_need_re_compute( 1351 struct drm_atomic_state *state, 1352 struct dc_state *dc_state, 1353 struct dc_link *dc_link) 1354 { 1355 int i, j; 1356 bool is_dsc_need_re_compute = false; 1357 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; 1358 int new_stream_on_link_num = 0; 1359 struct amdgpu_dm_connector *aconnector; 1360 struct dc_stream_state *stream; 1361 const struct dc *dc = dc_link->dc; 1362 1363 /* only check phy used by dsc mst branch */ 1364 if (dc_link->type != dc_connection_mst_branch) 1365 goto out; 1366 1367 /* add a check for older MST DSC with no virtual DPCDs */ 1368 if (needs_dsc_aux_workaround(dc_link) && 1369 (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || 1370 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) 1371 goto out; 1372 1373 for (i = 0; i < MAX_PIPES; i++) 1374 stream_on_link[i] = NULL; 1375 1376 DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count); 1377 1378 /* check if there is mode change in new request */ 1379 for (i = 0; i < dc_state->stream_count; i++) { 1380 struct drm_crtc_state *new_crtc_state; 1381 struct drm_connector_state *new_conn_state; 1382 1383 stream = dc_state->streams[i]; 1384 if (!stream) 1385 continue; 1386 1387 DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream); 1388 1389 /* check if stream using the same link for mst */ 1390 if (stream->link != dc_link) 1391 continue; 1392 1393 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; 1394 if (!aconnector) 1395 continue; 1396 1397 stream_on_link[new_stream_on_link_num] = aconnector; 1398 new_stream_on_link_num++; 1399 1400 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); 1401 if (!new_conn_state) { 1402 DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n", 1403 __func__, __LINE__, stream, aconnector); 1404 continue; 1405 } 1406 1407 if (IS_ERR(new_conn_state)) 1408 continue; 1409 1410 if (!new_conn_state->crtc) 1411 continue; 1412 1413 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 1414 if (!new_crtc_state) { 1415 DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n", 1416 __func__, __LINE__, stream, aconnector); 1417 continue; 1418 } 1419 1420 if (IS_ERR(new_crtc_state)) 1421 continue; 1422 1423 if (new_crtc_state->enable && new_crtc_state->active) { 1424 if (new_crtc_state->mode_changed || new_crtc_state->active_changed || 1425 new_crtc_state->connectors_changed) { 1426 DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." 1427 "stream 0x%p in new dc_state\n", 1428 __func__, __LINE__, stream); 1429 is_dsc_need_re_compute = true; 1430 goto out; 1431 } 1432 } 1433 } 1434 1435 if (new_stream_on_link_num == 0) { 1436 DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n", 1437 __func__, __LINE__); 1438 is_dsc_need_re_compute = false; 1439 goto out; 1440 } 1441 1442 DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n", 1443 __func__, dc->current_state->stream_count); 1444 1445 /* check current_state if there stream on link but it is not in 1446 * new request state 1447 */ 1448 for (i = 0; i < dc->current_state->stream_count; i++) { 1449 stream = dc->current_state->streams[i]; 1450 /* only check stream on the mst hub */ 1451 if (stream->link != dc_link) 1452 continue; 1453 1454 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1455 if (!aconnector) 1456 continue; 1457 1458 for (j = 0; j < new_stream_on_link_num; j++) { 1459 if (stream_on_link[j]) { 1460 if (aconnector == stream_on_link[j]) 1461 break; 1462 } 1463 } 1464 1465 if (j == new_stream_on_link_num) { 1466 /* not in new state */ 1467 DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." 1468 "stream 0x%p in current dc_state but not in new dc_state\n", 1469 __func__, __LINE__, stream); 1470 is_dsc_need_re_compute = true; 1471 break; 1472 } 1473 } 1474 1475 out: 1476 DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n", 1477 __func__, is_dsc_need_re_compute ? "required" : "not required"); 1478 1479 return is_dsc_need_re_compute; 1480 } 1481 1482 int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1483 struct dc_state *dc_state, 1484 struct dsc_mst_fairness_vars *vars) 1485 { 1486 int i, j; 1487 struct dc_stream_state *stream; 1488 bool computed_streams[MAX_PIPES]; 1489 struct amdgpu_dm_connector *aconnector; 1490 struct drm_dp_mst_topology_mgr *mst_mgr; 1491 struct resource_pool *res_pool; 1492 int link_vars_start_index = 0; 1493 int ret = 0; 1494 1495 for (i = 0; i < dc_state->stream_count; i++) 1496 computed_streams[i] = false; 1497 1498 for (i = 0; i < dc_state->stream_count; i++) { 1499 stream = dc_state->streams[i]; 1500 res_pool = stream->ctx->dc->res_pool; 1501 1502 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1503 continue; 1504 1505 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1506 1507 DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n", 1508 __func__, stream, aconnector); 1509 1510 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) 1511 continue; 1512 1513 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1514 continue; 1515 1516 if (computed_streams[i]) 1517 continue; 1518 1519 if (res_pool->funcs->remove_stream_from_ctx && 1520 res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) 1521 return -EINVAL; 1522 1523 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1524 continue; 1525 1526 mst_mgr = aconnector->mst_output_port->mgr; 1527 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, 1528 &link_vars_start_index); 1529 if (ret != 0) 1530 return ret; 1531 1532 for (j = 0; j < dc_state->stream_count; j++) { 1533 if (dc_state->streams[j]->link == stream->link) 1534 computed_streams[j] = true; 1535 } 1536 } 1537 1538 for (i = 0; i < dc_state->stream_count; i++) { 1539 stream = dc_state->streams[i]; 1540 1541 if (stream->timing.flags.DSC == 1) 1542 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) { 1543 DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n", 1544 __func__, __LINE__, stream); 1545 return -EINVAL; 1546 } 1547 } 1548 1549 return ret; 1550 } 1551 1552 static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1553 struct dc_state *dc_state, 1554 struct dsc_mst_fairness_vars *vars) 1555 { 1556 int i, j; 1557 struct dc_stream_state *stream; 1558 bool computed_streams[MAX_PIPES]; 1559 struct amdgpu_dm_connector *aconnector; 1560 struct drm_dp_mst_topology_mgr *mst_mgr; 1561 int link_vars_start_index = 0; 1562 int ret = 0; 1563 1564 for (i = 0; i < dc_state->stream_count; i++) 1565 computed_streams[i] = false; 1566 1567 for (i = 0; i < dc_state->stream_count; i++) { 1568 stream = dc_state->streams[i]; 1569 1570 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1571 continue; 1572 1573 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1574 1575 DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n", 1576 i, stream, aconnector); 1577 1578 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) 1579 continue; 1580 1581 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1582 continue; 1583 1584 if (computed_streams[i]) 1585 continue; 1586 1587 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1588 continue; 1589 1590 mst_mgr = aconnector->mst_output_port->mgr; 1591 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, 1592 &link_vars_start_index); 1593 if (ret != 0) 1594 return ret; 1595 1596 for (j = 0; j < dc_state->stream_count; j++) { 1597 if (dc_state->streams[j]->link == stream->link) 1598 computed_streams[j] = true; 1599 } 1600 } 1601 1602 return ret; 1603 } 1604 1605 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, 1606 struct dc_stream_state *stream) 1607 { 1608 int i; 1609 struct drm_crtc *crtc; 1610 struct drm_crtc_state *new_state, *old_state; 1611 1612 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { 1613 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); 1614 1615 if (dm_state->stream == stream) 1616 return i; 1617 } 1618 return -1; 1619 } 1620 1621 static bool is_link_to_dschub(struct dc_link *dc_link) 1622 { 1623 union dpcd_dsc_basic_capabilities *dsc_caps = 1624 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; 1625 1626 /* only check phy used by dsc mst branch */ 1627 if (dc_link->type != dc_connection_mst_branch) 1628 return false; 1629 1630 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || 1631 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1632 return false; 1633 return true; 1634 } 1635 1636 static bool is_dsc_precompute_needed(struct drm_atomic_state *state) 1637 { 1638 int i; 1639 struct drm_crtc *crtc; 1640 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1641 bool ret = false; 1642 1643 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1644 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); 1645 1646 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { 1647 ret = false; 1648 break; 1649 } 1650 if (dm_crtc_state->stream && dm_crtc_state->stream->link) 1651 if (is_link_to_dschub(dm_crtc_state->stream->link)) 1652 ret = true; 1653 } 1654 return ret; 1655 } 1656 1657 int pre_validate_dsc(struct drm_atomic_state *state, 1658 struct dm_atomic_state **dm_state_ptr, 1659 struct dsc_mst_fairness_vars *vars) 1660 { 1661 int i; 1662 struct dm_atomic_state *dm_state; 1663 struct dc_state *local_dc_state = NULL; 1664 int ret = 0; 1665 1666 if (!is_dsc_precompute_needed(state)) { 1667 DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__); 1668 return 0; 1669 } 1670 ret = dm_atomic_get_state(state, dm_state_ptr); 1671 if (ret != 0) { 1672 DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__); 1673 return ret; 1674 } 1675 dm_state = *dm_state_ptr; 1676 1677 /* 1678 * create local vailable for dc_state. copy content of streams of dm_state->context 1679 * to local variable. make sure stream pointer of local variable not the same as stream 1680 * from dm_state->context. 1681 */ 1682 1683 local_dc_state = vmalloc(sizeof(struct dc_state)); 1684 if (!local_dc_state) 1685 return -ENOMEM; 1686 memcpy(local_dc_state, dm_state->context, sizeof(struct dc_state)); 1687 1688 for (i = 0; i < local_dc_state->stream_count; i++) { 1689 struct dc_stream_state *stream = dm_state->context->streams[i]; 1690 int ind = find_crtc_index_in_state_by_stream(state, stream); 1691 1692 if (ind >= 0) { 1693 struct drm_connector *connector; 1694 struct drm_connector_state *drm_new_conn_state; 1695 struct dm_connector_state *dm_new_conn_state; 1696 struct dm_crtc_state *dm_old_crtc_state; 1697 1698 connector = 1699 amdgpu_dm_find_first_crtc_matching_connector(state, 1700 state->crtcs[ind].ptr); 1701 if (!connector) 1702 continue; 1703 1704 drm_new_conn_state = 1705 drm_atomic_get_new_connector_state(state, 1706 connector); 1707 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 1708 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); 1709 1710 local_dc_state->streams[i] = 1711 create_validate_stream_for_sink(connector, 1712 &state->crtcs[ind].new_state->mode, 1713 dm_new_conn_state, 1714 dm_old_crtc_state->stream); 1715 if (local_dc_state->streams[i] == NULL) { 1716 ret = -EINVAL; 1717 break; 1718 } 1719 } 1720 } 1721 1722 if (ret != 0) 1723 goto clean_exit; 1724 1725 ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); 1726 if (ret != 0) { 1727 DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n", 1728 __func__, __LINE__); 1729 ret = -EINVAL; 1730 goto clean_exit; 1731 } 1732 1733 /* 1734 * compare local_streams -> timing with dm_state->context, 1735 * if the same set crtc_state->mode-change = 0; 1736 */ 1737 for (i = 0; i < local_dc_state->stream_count; i++) { 1738 struct dc_stream_state *stream = dm_state->context->streams[i]; 1739 1740 if (local_dc_state->streams[i] && 1741 dc_is_timing_changed(stream, local_dc_state->streams[i])) { 1742 DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i); 1743 } else { 1744 int ind = find_crtc_index_in_state_by_stream(state, stream); 1745 1746 if (ind >= 0) { 1747 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(state->crtcs[ind].new_state); 1748 1749 DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n", 1750 __func__, __LINE__, stream); 1751 dm_new_crtc_state->base.mode_changed = dm_new_crtc_state->mode_changed_independent_from_dsc; 1752 } 1753 } 1754 } 1755 clean_exit: 1756 for (i = 0; i < local_dc_state->stream_count; i++) { 1757 struct dc_stream_state *stream = dm_state->context->streams[i]; 1758 1759 if (local_dc_state->streams[i] != stream) 1760 dc_stream_release(local_dc_state->streams[i]); 1761 } 1762 1763 vfree(local_dc_state); 1764 1765 return ret; 1766 } 1767 1768 static uint32_t kbps_from_pbn(unsigned int pbn) 1769 { 1770 uint64_t kbps = (uint64_t)pbn; 1771 1772 kbps *= (1000000 / PEAK_FACTOR_X1000); 1773 kbps *= 8; 1774 kbps *= 54; 1775 kbps /= 64; 1776 1777 return (uint32_t)kbps; 1778 } 1779 1780 static bool is_dsc_common_config_possible(struct dc_stream_state *stream, 1781 struct dc_dsc_bw_range *bw_range) 1782 { 1783 struct dc_dsc_policy dsc_policy = {0}; 1784 bool is_dsc_possible; 1785 1786 dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); 1787 is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], 1788 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 1789 dsc_policy.min_target_bpp * 16, 1790 dsc_policy.max_target_bpp * 16, 1791 &stream->sink->dsc_caps.dsc_dec_caps, 1792 &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); 1793 1794 return is_dsc_possible; 1795 } 1796 #endif 1797 1798 #if defined(CONFIG_DRM_AMD_DC_FP) 1799 static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw) 1800 { 1801 uint32_t total_data_bw_efficiency_x10000 = 0; 1802 uint32_t link_rate_per_lane_kbps = 0; 1803 enum dc_link_rate link_rate; 1804 union lane_count_set lane_count; 1805 u8 dp_link_encoding; 1806 u8 link_bw_set = 0; 1807 u8 data[16] = {0}; 1808 1809 *cur_link_bw = 0; 1810 1811 if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16) 1812 return false; 1813 1814 dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET]; 1815 link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET]; 1816 lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET]; 1817 1818 drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n", 1819 dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET); 1820 1821 switch (dp_link_encoding) { 1822 case DP_8b_10b_ENCODING: 1823 link_rate = link_bw_set; 1824 link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; 1825 total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; 1826 total_data_bw_efficiency_x10000 /= 100; 1827 total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; 1828 break; 1829 case DP_128b_132b_ENCODING: 1830 switch (link_bw_set) { 1831 case DP_LINK_BW_10: 1832 link_rate = LINK_RATE_UHBR10; 1833 break; 1834 case DP_LINK_BW_13_5: 1835 link_rate = LINK_RATE_UHBR13_5; 1836 break; 1837 case DP_LINK_BW_20: 1838 link_rate = LINK_RATE_UHBR20; 1839 break; 1840 default: 1841 return false; 1842 } 1843 1844 link_rate_per_lane_kbps = link_rate * 10000; 1845 total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; 1846 break; 1847 default: 1848 return false; 1849 } 1850 1851 *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000; 1852 return true; 1853 } 1854 #endif 1855 1856 enum dc_status dm_dp_mst_is_port_support_mode( 1857 struct amdgpu_dm_connector *aconnector, 1858 struct dc_stream_state *stream) 1859 { 1860 #if defined(CONFIG_DRM_AMD_DC_FP) 1861 int branch_max_throughput_mps = 0; 1862 struct dc_link_settings cur_link_settings; 1863 uint32_t end_to_end_bw_in_kbps = 0; 1864 uint32_t root_link_bw_in_kbps = 0; 1865 uint32_t virtual_channel_bw_in_kbps = 0; 1866 struct dc_dsc_bw_range bw_range = {0}; 1867 struct dc_dsc_config_options dsc_options = {0}; 1868 uint32_t stream_kbps; 1869 1870 /* DSC unnecessary case 1871 * Check if timing could be supported within end-to-end BW 1872 */ 1873 stream_kbps = 1874 dc_bandwidth_in_kbps_from_timing(&stream->timing, 1875 dc_link_get_highest_encoding_format(stream->link)); 1876 cur_link_settings = stream->link->verified_link_cap; 1877 root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); 1878 virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); 1879 1880 /* pick the end to end bw bottleneck */ 1881 end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); 1882 1883 if (stream_kbps <= end_to_end_bw_in_kbps) { 1884 DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n"); 1885 return DC_OK; 1886 } 1887 1888 /*DSC necessary case*/ 1889 if (!aconnector->dsc_aux) 1890 return DC_FAIL_BANDWIDTH_VALIDATE; 1891 1892 if (is_dsc_common_config_possible(stream, &bw_range)) { 1893 1894 /*capable of dsc passthough. dsc bitstream along the entire path*/ 1895 if (aconnector->mst_output_port->passthrough_aux) { 1896 if (bw_range.min_kbps > end_to_end_bw_in_kbps) { 1897 DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint" 1898 "Max dsc compression bw can't fit into end-to-end bw\n"); 1899 return DC_FAIL_BANDWIDTH_VALIDATE; 1900 } 1901 } else { 1902 /*dsc bitstream decoded at the dp last link*/ 1903 struct drm_dp_mst_port *immediate_upstream_port = NULL; 1904 uint32_t end_link_bw = 0; 1905 1906 /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/ 1907 if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV && 1908 aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) { 1909 if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) { 1910 dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw); 1911 aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn; 1912 aconnector->mst_local_bw = end_link_bw; 1913 } else { 1914 end_link_bw = aconnector->mst_local_bw; 1915 } 1916 1917 if (end_link_bw > 0 && 1918 stream_kbps > end_link_bw && 1919 aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) { 1920 DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. " 1921 "Mode required bw can't fit into last link\n"); 1922 return DC_FAIL_BANDWIDTH_VALIDATE; 1923 } 1924 } 1925 1926 /*Get virtual channel bandwidth between source and the link before the last link*/ 1927 if (aconnector->mst_output_port->parent->port_parent) 1928 immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; 1929 1930 if (immediate_upstream_port) { 1931 virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); 1932 virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); 1933 } else { 1934 /* For topology LCT 1 case - only one mstb*/ 1935 virtual_channel_bw_in_kbps = root_link_bw_in_kbps; 1936 } 1937 1938 if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { 1939 DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." 1940 "Max dsc compression can't fit into MST available bw\n"); 1941 return DC_FAIL_BANDWIDTH_VALIDATE; 1942 } 1943 } 1944 1945 /*Confirm if we can obtain dsc config*/ 1946 dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); 1947 dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; 1948 if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], 1949 &stream->sink->dsc_caps.dsc_dec_caps, 1950 &dsc_options, 1951 end_to_end_bw_in_kbps, 1952 &stream->timing, 1953 dc_link_get_highest_encoding_format(stream->link), 1954 &stream->timing.dsc_cfg)) { 1955 stream->timing.flags.DSC = 1; 1956 DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n"); 1957 } else { 1958 DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n"); 1959 return DC_FAIL_BANDWIDTH_VALIDATE; 1960 } 1961 1962 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ 1963 switch (stream->timing.pixel_encoding) { 1964 case PIXEL_ENCODING_RGB: 1965 case PIXEL_ENCODING_YCBCR444: 1966 branch_max_throughput_mps = 1967 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; 1968 break; 1969 case PIXEL_ENCODING_YCBCR422: 1970 case PIXEL_ENCODING_YCBCR420: 1971 branch_max_throughput_mps = 1972 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; 1973 break; 1974 default: 1975 break; 1976 } 1977 1978 if (branch_max_throughput_mps != 0 && 1979 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) { 1980 DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n"); 1981 return DC_FAIL_BANDWIDTH_VALIDATE; 1982 } 1983 } else { 1984 DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n"); 1985 return DC_FAIL_BANDWIDTH_VALIDATE; 1986 } 1987 #endif 1988 return DC_OK; 1989 } 1990