1 /*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/vmalloc.h>
27 #include <drm/display/drm_dp_helper.h>
28 #include <drm/display/drm_dp_mst_helper.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_fixed.h>
32 #include <drm/drm_edid.h>
33 #include "dm_services.h"
34 #include "amdgpu.h"
35 #include "amdgpu_dm.h"
36 #include "amdgpu_dm_mst_types.h"
37 #include "amdgpu_dm_hdcp.h"
38
39 #include "dc.h"
40 #include "dm_helpers.h"
41
42 #include "ddc_service_types.h"
43 #include "dpcd_defs.h"
44
45 #include "dmub_cmd.h"
46 #if defined(CONFIG_DEBUG_FS)
47 #include "amdgpu_dm_debugfs.h"
48 #endif
49
50 #include "dc/resource/dcn20/dcn20_resource.h"
51
52 #define PEAK_FACTOR_X1000 1006
53
dm_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)54 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
55 struct drm_dp_aux_msg *msg)
56 {
57 ssize_t result = 0;
58 struct aux_payload payload;
59 enum aux_return_code_type operation_result;
60 struct amdgpu_device *adev;
61 struct ddc_service *ddc;
62
63 if (WARN_ON(msg->size > 16))
64 return -E2BIG;
65
66 payload.address = msg->address;
67 payload.data = msg->buffer;
68 payload.length = msg->size;
69 payload.reply = &msg->reply;
70 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
71 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
72 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
73 payload.write_status_update =
74 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
75 payload.defer_delay = 0;
76
77 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
78 &operation_result);
79
80 /*
81 * w/a on certain intel platform where hpd is unexpected to pull low during
82 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
83 * aux transaction is succuess in such case, therefore bypass the error
84 */
85 ddc = TO_DM_AUX(aux)->ddc_service;
86 adev = ddc->ctx->driver_context;
87 if (adev->dm.aux_hpd_discon_quirk) {
88 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
89 operation_result == AUX_RET_ERROR_HPD_DISCON) {
90 result = 0;
91 operation_result = AUX_RET_SUCCESS;
92 }
93 }
94
95 if (payload.write && result >= 0)
96 result = msg->size;
97
98 if (result < 0)
99 switch (operation_result) {
100 case AUX_RET_SUCCESS:
101 break;
102 case AUX_RET_ERROR_HPD_DISCON:
103 case AUX_RET_ERROR_UNKNOWN:
104 case AUX_RET_ERROR_INVALID_OPERATION:
105 case AUX_RET_ERROR_PROTOCOL_ERROR:
106 result = -EIO;
107 break;
108 case AUX_RET_ERROR_INVALID_REPLY:
109 case AUX_RET_ERROR_ENGINE_ACQUIRE:
110 result = -EBUSY;
111 break;
112 case AUX_RET_ERROR_TIMEOUT:
113 result = -ETIMEDOUT;
114 break;
115 }
116
117 return result;
118 }
119
120 static void
dm_dp_mst_connector_destroy(struct drm_connector * connector)121 dm_dp_mst_connector_destroy(struct drm_connector *connector)
122 {
123 struct amdgpu_dm_connector *aconnector =
124 to_amdgpu_dm_connector(connector);
125
126 if (aconnector->dc_sink) {
127 dc_link_remove_remote_sink(aconnector->dc_link,
128 aconnector->dc_sink);
129 dc_sink_release(aconnector->dc_sink);
130 }
131
132 drm_edid_free(aconnector->drm_edid);
133
134 drm_connector_cleanup(connector);
135 drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
136 kfree(aconnector);
137 }
138
139 static int
amdgpu_dm_mst_connector_late_register(struct drm_connector * connector)140 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
141 {
142 struct amdgpu_dm_connector *amdgpu_dm_connector =
143 to_amdgpu_dm_connector(connector);
144 int r;
145
146 r = drm_dp_mst_connector_late_register(connector,
147 amdgpu_dm_connector->mst_output_port);
148 if (r < 0)
149 return r;
150
151 #if defined(CONFIG_DEBUG_FS)
152 connector_debugfs_init(amdgpu_dm_connector);
153 #endif
154
155 return 0;
156 }
157
158
159 static inline void
amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector * aconnector)160 amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
161 {
162 aconnector->drm_edid = NULL;
163 aconnector->dsc_aux = NULL;
164 aconnector->mst_output_port->passthrough_aux = NULL;
165 aconnector->mst_local_bw = 0;
166 aconnector->vc_full_pbn = 0;
167 }
168
169 static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector * connector)170 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
171 {
172 struct amdgpu_dm_connector *aconnector =
173 to_amdgpu_dm_connector(connector);
174 struct drm_dp_mst_port *port = aconnector->mst_output_port;
175 struct amdgpu_dm_connector *root = aconnector->mst_root;
176 struct dc_link *dc_link = aconnector->dc_link;
177 struct dc_sink *dc_sink = aconnector->dc_sink;
178
179 drm_dp_mst_connector_early_unregister(connector, port);
180
181 /*
182 * Release dc_sink for connector which its attached port is
183 * no longer in the mst topology
184 */
185 drm_modeset_lock(&root->mst_mgr.base.lock, NULL);
186 if (dc_sink) {
187 if (dc_link->sink_count)
188 dc_link_remove_remote_sink(dc_link, dc_sink);
189
190 drm_dbg_dp(connector->dev,
191 "DM_MST: remove remote sink 0x%p, %d remaining\n",
192 dc_sink, dc_link->sink_count);
193
194 dc_sink_release(dc_sink);
195 aconnector->dc_sink = NULL;
196 amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
197 }
198
199 aconnector->mst_status = MST_STATUS_DEFAULT;
200 drm_modeset_unlock(&root->mst_mgr.base.lock);
201 }
202
203 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
204 .fill_modes = drm_helper_probe_single_connector_modes,
205 .destroy = dm_dp_mst_connector_destroy,
206 .reset = amdgpu_dm_connector_funcs_reset,
207 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
208 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
209 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
210 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
211 .late_register = amdgpu_dm_mst_connector_late_register,
212 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
213 };
214
needs_dsc_aux_workaround(struct dc_link * link)215 bool needs_dsc_aux_workaround(struct dc_link *link)
216 {
217 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
218 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
219 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
220 return true;
221
222 return false;
223 }
224
225 #if defined(CONFIG_DRM_AMD_DC_FP)
is_synaptics_cascaded_panamera(struct dc_link * link,struct drm_dp_mst_port * port)226 static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
227 {
228 u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
229
230 if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
231 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
232 IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
233 DRM_INFO("Synaptics Cascaded MST hub\n");
234 return true;
235 }
236 }
237
238 return false;
239 }
240
validate_dsc_caps_on_connector(struct amdgpu_dm_connector * aconnector)241 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
242 {
243 struct dc_sink *dc_sink = aconnector->dc_sink;
244 struct drm_dp_mst_port *port = aconnector->mst_output_port;
245 u8 dsc_caps[16] = { 0 };
246 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
247 u8 *dsc_branch_dec_caps = NULL;
248
249 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
250
251 /*
252 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
253 * because it only check the dsc/fec caps of the "port variable" and not the dock
254 *
255 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
256 *
257 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
258 *
259 */
260 if (!aconnector->dsc_aux && !port->parent->port_parent &&
261 needs_dsc_aux_workaround(aconnector->dc_link))
262 aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
263
264 /* synaptics cascaded MST hub case */
265 if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
266 aconnector->dsc_aux = port->mgr->aux;
267
268 if (!aconnector->dsc_aux)
269 return false;
270
271 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
272 return false;
273
274 if (drm_dp_dpcd_read(aconnector->dsc_aux,
275 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
276 dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
277
278 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
279 dsc_caps, dsc_branch_dec_caps,
280 &dc_sink->dsc_caps.dsc_dec_caps))
281 return false;
282
283 return true;
284 }
285 #endif
286
retrieve_downstream_port_device(struct amdgpu_dm_connector * aconnector)287 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
288 {
289 union dp_downstream_port_present ds_port_present;
290
291 if (!aconnector->dsc_aux)
292 return false;
293
294 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
295 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
296 return false;
297 }
298
299 aconnector->mst_downstream_port_present = ds_port_present;
300 DRM_INFO("Downstream port present %d, type %d\n",
301 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
302
303 return true;
304 }
305
dm_dp_mst_get_modes(struct drm_connector * connector)306 static int dm_dp_mst_get_modes(struct drm_connector *connector)
307 {
308 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
309 int ret = 0;
310
311 if (!aconnector)
312 return drm_add_edid_modes(connector, NULL);
313
314 if (!aconnector->drm_edid) {
315 const struct drm_edid *drm_edid;
316
317 drm_edid = drm_dp_mst_edid_read(connector,
318 &aconnector->mst_root->mst_mgr,
319 aconnector->mst_output_port);
320
321 if (!drm_edid) {
322 amdgpu_dm_set_mst_status(&aconnector->mst_status,
323 MST_REMOTE_EDID, false);
324
325 drm_edid_connector_update(
326 &aconnector->base,
327 NULL);
328
329 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
330 if (!aconnector->dc_sink) {
331 struct dc_sink *dc_sink;
332 struct dc_sink_init_data init_params = {
333 .link = aconnector->dc_link,
334 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
335
336 dc_sink = dc_link_add_remote_sink(
337 aconnector->dc_link,
338 NULL,
339 0,
340 &init_params);
341
342 if (!dc_sink) {
343 DRM_ERROR("Unable to add a remote sink\n");
344 return 0;
345 }
346
347 drm_dbg_dp(connector->dev,
348 "DM_MST: add remote sink 0x%p, %d remaining\n",
349 dc_sink,
350 aconnector->dc_link->sink_count);
351
352 dc_sink->priv = aconnector;
353 aconnector->dc_sink = dc_sink;
354 }
355
356 return ret;
357 }
358
359 aconnector->drm_edid = drm_edid;
360 amdgpu_dm_set_mst_status(&aconnector->mst_status,
361 MST_REMOTE_EDID, true);
362 }
363
364 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
365 dc_sink_release(aconnector->dc_sink);
366 aconnector->dc_sink = NULL;
367 }
368
369 if (!aconnector->dc_sink) {
370 struct dc_sink *dc_sink;
371 struct dc_sink_init_data init_params = {
372 .link = aconnector->dc_link,
373 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
374 const struct edid *edid;
375
376 edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw()
377 dc_sink = dc_link_add_remote_sink(
378 aconnector->dc_link,
379 (uint8_t *)edid,
380 (edid->extensions + 1) * EDID_LENGTH,
381 &init_params);
382
383 if (!dc_sink) {
384 DRM_ERROR("Unable to add a remote sink\n");
385 return 0;
386 }
387
388 drm_dbg_dp(connector->dev,
389 "DM_MST: add remote sink 0x%p, %d remaining\n",
390 dc_sink, aconnector->dc_link->sink_count);
391
392 dc_sink->priv = aconnector;
393 /* dc_link_add_remote_sink returns a new reference */
394 aconnector->dc_sink = dc_sink;
395
396 /* when display is unplugged from mst hub, connctor will be
397 * destroyed within dm_dp_mst_connector_destroy. connector
398 * hdcp perperties, like type, undesired, desired, enabled,
399 * will be lost. So, save hdcp properties into hdcp_work within
400 * amdgpu_dm_atomic_commit_tail. if the same display is
401 * plugged back with same display index, its hdcp properties
402 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
403 */
404 if (aconnector->dc_sink && connector->state) {
405 struct drm_device *dev = connector->dev;
406 struct amdgpu_device *adev = drm_to_adev(dev);
407
408 if (adev->dm.hdcp_workqueue) {
409 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
410 struct hdcp_workqueue *hdcp_w =
411 &hdcp_work[aconnector->dc_link->link_index];
412
413 connector->state->hdcp_content_type =
414 hdcp_w->hdcp_content_type[connector->index];
415 connector->state->content_protection =
416 hdcp_w->content_protection[connector->index];
417 }
418 }
419
420 if (aconnector->dc_sink) {
421 amdgpu_dm_update_freesync_caps(
422 connector, aconnector->drm_edid);
423
424 #if defined(CONFIG_DRM_AMD_DC_FP)
425 if (!validate_dsc_caps_on_connector(aconnector))
426 memset(&aconnector->dc_sink->dsc_caps,
427 0, sizeof(aconnector->dc_sink->dsc_caps));
428 #endif
429
430 if (!retrieve_downstream_port_device(aconnector))
431 memset(&aconnector->mst_downstream_port_present,
432 0, sizeof(aconnector->mst_downstream_port_present));
433 }
434 }
435
436 drm_edid_connector_update(&aconnector->base, aconnector->drm_edid);
437
438 ret = drm_edid_connector_add_modes(connector);
439
440 return ret;
441 }
442
443 static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector * connector,struct drm_atomic_state * state)444 dm_mst_atomic_best_encoder(struct drm_connector *connector,
445 struct drm_atomic_state *state)
446 {
447 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
448 connector);
449 struct amdgpu_device *adev = drm_to_adev(connector->dev);
450 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
451
452 return &adev->dm.mst_encoders[acrtc->crtc_id].base;
453 }
454
455 static int
dm_dp_mst_detect(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,bool force)456 dm_dp_mst_detect(struct drm_connector *connector,
457 struct drm_modeset_acquire_ctx *ctx, bool force)
458 {
459 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
460 struct amdgpu_dm_connector *master = aconnector->mst_root;
461 struct drm_dp_mst_port *port = aconnector->mst_output_port;
462 int connection_status;
463
464 if (drm_connector_is_unregistered(connector))
465 return connector_status_disconnected;
466
467 connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
468 aconnector->mst_output_port);
469
470 if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {
471 uint8_t dpcd_rev;
472 int ret;
473
474 ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev);
475
476 if (ret == 1) {
477 port->dpcd_rev = dpcd_rev;
478
479 /* Could be DP1.2 DP Rx case*/
480 if (!dpcd_rev) {
481 ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev);
482
483 if (ret == 1)
484 port->dpcd_rev = dpcd_rev;
485 }
486
487 if (!dpcd_rev)
488 DRM_DEBUG_KMS("Can't decide DPCD revision number!");
489 }
490
491 /*
492 * Could be legacy sink, logical port etc on DP1.2.
493 * Will get Nack under these cases when issue remote
494 * DPCD read.
495 */
496 if (ret != 1)
497 DRM_DEBUG_KMS("Can't access DPCD");
498 } else if (port->pdt == DP_PEER_DEVICE_NONE) {
499 port->dpcd_rev = 0;
500 }
501
502 /*
503 * Release dc_sink for connector which unplug event is notified by CSN msg
504 */
505 if (connection_status == connector_status_disconnected && aconnector->dc_sink) {
506 if (aconnector->dc_link->sink_count)
507 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
508
509 drm_dbg_dp(connector->dev,
510 "DM_MST: remove remote sink 0x%p, %d remaining\n",
511 aconnector->dc_link,
512 aconnector->dc_link->sink_count);
513
514 dc_sink_release(aconnector->dc_sink);
515 aconnector->dc_sink = NULL;
516 amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
517
518 amdgpu_dm_set_mst_status(&aconnector->mst_status,
519 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
520 false);
521 }
522
523 return connection_status;
524 }
525
dm_dp_mst_atomic_check(struct drm_connector * connector,struct drm_atomic_state * state)526 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
527 struct drm_atomic_state *state)
528 {
529 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
530 struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr;
531 struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;
532
533 return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
534 }
535
536 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
537 .get_modes = dm_dp_mst_get_modes,
538 .mode_valid = amdgpu_dm_connector_mode_valid,
539 .atomic_best_encoder = dm_mst_atomic_best_encoder,
540 .detect_ctx = dm_dp_mst_detect,
541 .atomic_check = dm_dp_mst_atomic_check,
542 };
543
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)544 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
545 {
546 drm_encoder_cleanup(encoder);
547 }
548
549 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
550 .destroy = amdgpu_dm_encoder_destroy,
551 };
552
553 void
dm_dp_create_fake_mst_encoders(struct amdgpu_device * adev)554 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
555 {
556 struct drm_device *dev = adev_to_drm(adev);
557 int i;
558
559 for (i = 0; i < adev->dm.display_indexes_num; i++) {
560 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
561 struct drm_encoder *encoder = &amdgpu_encoder->base;
562
563 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
564
565 drm_encoder_init(
566 dev,
567 &amdgpu_encoder->base,
568 &amdgpu_dm_encoder_funcs,
569 DRM_MODE_ENCODER_DPMST,
570 NULL);
571
572 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
573 }
574 }
575
576 static struct drm_connector *
dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,const char * pathprop)577 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
578 struct drm_dp_mst_port *port,
579 const char *pathprop)
580 {
581 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
582 struct drm_device *dev = master->base.dev;
583 struct amdgpu_device *adev = drm_to_adev(dev);
584 struct amdgpu_dm_connector *aconnector;
585 struct drm_connector *connector;
586 int i;
587
588 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
589 if (!aconnector)
590 return NULL;
591
592 DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port);
593
594 connector = &aconnector->base;
595 aconnector->mst_output_port = port;
596 aconnector->mst_root = master;
597 amdgpu_dm_set_mst_status(&aconnector->mst_status,
598 MST_PROBE, true);
599
600 if (drm_connector_dynamic_init(
601 dev,
602 connector,
603 &dm_dp_mst_connector_funcs,
604 DRM_MODE_CONNECTOR_DisplayPort,
605 NULL)) {
606 kfree(aconnector);
607 return NULL;
608 }
609 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
610
611 amdgpu_dm_connector_init_helper(
612 &adev->dm,
613 aconnector,
614 DRM_MODE_CONNECTOR_DisplayPort,
615 master->dc_link,
616 master->connector_id);
617
618 for (i = 0; i < adev->dm.display_indexes_num; i++) {
619 drm_connector_attach_encoder(&aconnector->base,
620 &adev->dm.mst_encoders[i].base);
621 }
622
623 connector->max_bpc_property = master->base.max_bpc_property;
624 if (connector->max_bpc_property)
625 drm_connector_attach_max_bpc_property(connector, 8, 16);
626
627 connector->vrr_capable_property = master->base.vrr_capable_property;
628 if (connector->vrr_capable_property)
629 drm_connector_attach_vrr_capable_property(connector);
630
631 drm_object_attach_property(
632 &connector->base,
633 dev->mode_config.path_property,
634 0);
635 drm_object_attach_property(
636 &connector->base,
637 dev->mode_config.tile_property,
638 0);
639 connector->colorspace_property = master->base.colorspace_property;
640 if (connector->colorspace_property)
641 drm_connector_attach_colorspace_property(connector);
642
643 drm_connector_set_path_property(connector, pathprop);
644
645 /*
646 * Initialize connector state before adding the connectror to drm and
647 * framebuffer lists
648 */
649 amdgpu_dm_connector_funcs_reset(connector);
650
651 drm_dp_mst_get_port_malloc(port);
652
653 return connector;
654 }
655
dm_handle_mst_sideband_msg_ready_event(struct drm_dp_mst_topology_mgr * mgr,enum mst_msg_ready_type msg_rdy_type)656 void dm_handle_mst_sideband_msg_ready_event(
657 struct drm_dp_mst_topology_mgr *mgr,
658 enum mst_msg_ready_type msg_rdy_type)
659 {
660 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
661 uint8_t dret;
662 bool new_irq_handled = false;
663 int dpcd_addr;
664 uint8_t dpcd_bytes_to_read;
665 const uint8_t max_process_count = 30;
666 uint8_t process_count = 0;
667 u8 retry;
668 struct amdgpu_dm_connector *aconnector =
669 container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
670
671
672 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
673
674 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
675 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
676 /* DPCD 0x200 - 0x201 for downstream IRQ */
677 dpcd_addr = DP_SINK_COUNT;
678 } else {
679 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
680 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
681 dpcd_addr = DP_SINK_COUNT_ESI;
682 }
683
684 mutex_lock(&aconnector->handle_mst_msg_ready);
685
686 while (process_count < max_process_count) {
687 u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
688
689 process_count++;
690
691 dret = drm_dp_dpcd_read(
692 &aconnector->dm_dp_aux.aux,
693 dpcd_addr,
694 esi,
695 dpcd_bytes_to_read);
696
697 if (dret != dpcd_bytes_to_read) {
698 DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
699 break;
700 }
701
702 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
703
704 switch (msg_rdy_type) {
705 case DOWN_REP_MSG_RDY_EVENT:
706 /* Only handle DOWN_REP_MSG_RDY case*/
707 esi[1] &= DP_DOWN_REP_MSG_RDY;
708 break;
709 case UP_REQ_MSG_RDY_EVENT:
710 /* Only handle UP_REQ_MSG_RDY case*/
711 esi[1] &= DP_UP_REQ_MSG_RDY;
712 break;
713 default:
714 /* Handle both cases*/
715 esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
716 break;
717 }
718
719 if (!esi[1])
720 break;
721
722 /* handle MST irq */
723 if (aconnector->mst_mgr.mst_state)
724 drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
725 esi,
726 ack,
727 &new_irq_handled);
728
729 if (new_irq_handled) {
730 /* ACK at DPCD to notify down stream */
731 for (retry = 0; retry < 3; retry++) {
732 ssize_t wret;
733
734 wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
735 dpcd_addr + 1,
736 ack[1]);
737 if (wret == 1)
738 break;
739 }
740
741 if (retry == 3) {
742 DRM_ERROR("Failed to ack MST event.\n");
743 break;
744 }
745
746 drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
747
748 new_irq_handled = false;
749 } else {
750 break;
751 }
752 }
753
754 mutex_unlock(&aconnector->handle_mst_msg_ready);
755
756 if (process_count == max_process_count)
757 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
758 }
759
dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr * mgr)760 static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
761 {
762 dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
763 }
764
765 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
766 .add_connector = dm_dp_add_mst_connector,
767 .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
768 };
769
amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int link_index)770 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
771 struct amdgpu_dm_connector *aconnector,
772 int link_index)
773 {
774 struct dc_link_settings max_link_enc_cap = {0};
775
776 aconnector->dm_dp_aux.aux.name =
777 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
778 link_index);
779 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
780 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
781 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
782
783 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
784 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
785 &aconnector->base);
786
787 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
788 return;
789
790 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
791 aconnector->mst_mgr.cbs = &dm_mst_cbs;
792 drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
793 &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
794
795 drm_connector_attach_dp_subconnector_property(&aconnector->base);
796 }
797
dm_mst_get_pbn_divider(struct dc_link * link)798 int dm_mst_get_pbn_divider(struct dc_link *link)
799 {
800 if (!link)
801 return 0;
802
803 return dc_link_bandwidth_kbps(link,
804 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
805 }
806
807 struct dsc_mst_fairness_params {
808 struct dc_crtc_timing *timing;
809 struct dc_sink *sink;
810 struct dc_dsc_bw_range bw_range;
811 bool compression_possible;
812 struct drm_dp_mst_port *port;
813 enum dsc_clock_force_state clock_force_enable;
814 uint32_t num_slices_h;
815 uint32_t num_slices_v;
816 uint32_t bpp_overwrite;
817 struct amdgpu_dm_connector *aconnector;
818 };
819
820 #if defined(CONFIG_DRM_AMD_DC_FP)
get_fec_overhead_multiplier(struct dc_link * dc_link)821 static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
822 {
823 u8 link_coding_cap;
824 uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
825
826 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
827 if (link_coding_cap == DP_128b_132b_ENCODING)
828 fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
829
830 return fec_overhead_multiplier_x1000;
831 }
832
kbps_to_peak_pbn(int kbps,uint16_t fec_overhead_multiplier_x1000)833 static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
834 {
835 u64 peak_kbps = kbps;
836
837 peak_kbps *= 1006;
838 peak_kbps *= fec_overhead_multiplier_x1000;
839 peak_kbps = div_u64(peak_kbps, 1000 * 1000);
840 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
841 }
842
set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)843 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
844 struct dsc_mst_fairness_vars *vars,
845 int count,
846 int k)
847 {
848 struct drm_connector *drm_connector;
849 int i;
850 struct dc_dsc_config_options dsc_options = {0};
851
852 for (i = 0; i < count; i++) {
853 drm_connector = ¶ms[i].aconnector->base;
854
855 dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options);
856 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
857
858 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
859 if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
860 params[i].sink->ctx->dc->res_pool->dscs[0],
861 ¶ms[i].sink->dsc_caps.dsc_dec_caps,
862 &dsc_options,
863 0,
864 params[i].timing,
865 dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
866 ¶ms[i].timing->dsc_cfg)) {
867 params[i].timing->flags.DSC = 1;
868
869 if (params[i].bpp_overwrite)
870 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
871 else
872 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
873
874 if (params[i].num_slices_h)
875 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
876
877 if (params[i].num_slices_v)
878 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
879 } else {
880 params[i].timing->flags.DSC = 0;
881 }
882 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn;
883 }
884
885 for (i = 0; i < count; i++) {
886 if (params[i].sink) {
887 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
888 params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
889 DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i,
890 params[i].sink->edid_caps.display_name);
891 }
892
893 DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n",
894 params[i].timing->flags.DSC,
895 params[i].timing->dsc_cfg.bits_per_pixel,
896 vars[i + k].pbn);
897 }
898 }
899
bpp_x16_from_pbn(struct dsc_mst_fairness_params param,int pbn)900 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
901 {
902 struct dc_dsc_config dsc_config;
903 u64 kbps;
904
905 struct drm_connector *drm_connector = ¶m.aconnector->base;
906 struct dc_dsc_config_options dsc_options = {0};
907
908 dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
909 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
910
911 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
912 dc_dsc_compute_config(
913 param.sink->ctx->dc->res_pool->dscs[0],
914 ¶m.sink->dsc_caps.dsc_dec_caps,
915 &dsc_options,
916 (int) kbps, param.timing,
917 dc_link_get_highest_encoding_format(param.aconnector->dc_link),
918 &dsc_config);
919
920 return dsc_config.bits_per_pixel;
921 }
922
increase_dsc_bpp(struct drm_atomic_state * state,struct drm_dp_mst_topology_state * mst_state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)923 static int increase_dsc_bpp(struct drm_atomic_state *state,
924 struct drm_dp_mst_topology_state *mst_state,
925 struct dc_link *dc_link,
926 struct dsc_mst_fairness_params *params,
927 struct dsc_mst_fairness_vars *vars,
928 int count,
929 int k)
930 {
931 int i;
932 bool bpp_increased[MAX_PIPES];
933 int initial_slack[MAX_PIPES];
934 int min_initial_slack;
935 int next_index;
936 int remaining_to_increase = 0;
937 int link_timeslots_used;
938 int fair_pbn_alloc;
939 int ret = 0;
940 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
941
942 for (i = 0; i < count; i++) {
943 if (vars[i + k].dsc_enabled) {
944 initial_slack[i] =
945 kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
946 bpp_increased[i] = false;
947 remaining_to_increase += 1;
948 } else {
949 initial_slack[i] = 0;
950 bpp_increased[i] = true;
951 }
952 }
953
954 while (remaining_to_increase) {
955 next_index = -1;
956 min_initial_slack = -1;
957 for (i = 0; i < count; i++) {
958 if (!bpp_increased[i]) {
959 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
960 min_initial_slack = initial_slack[i];
961 next_index = i;
962 }
963 }
964 }
965
966 if (next_index == -1)
967 break;
968
969 link_timeslots_used = 0;
970
971 for (i = 0; i < count; i++)
972 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div));
973
974 fair_pbn_alloc =
975 (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div);
976
977 if (initial_slack[next_index] > fair_pbn_alloc) {
978 vars[next_index].pbn += fair_pbn_alloc;
979 ret = drm_dp_atomic_find_time_slots(state,
980 params[next_index].port->mgr,
981 params[next_index].port,
982 vars[next_index].pbn);
983 if (ret < 0)
984 return ret;
985
986 ret = drm_dp_mst_atomic_check(state);
987 if (ret == 0) {
988 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
989 } else {
990 vars[next_index].pbn -= fair_pbn_alloc;
991 ret = drm_dp_atomic_find_time_slots(state,
992 params[next_index].port->mgr,
993 params[next_index].port,
994 vars[next_index].pbn);
995 if (ret < 0)
996 return ret;
997 }
998 } else {
999 vars[next_index].pbn += initial_slack[next_index];
1000 ret = drm_dp_atomic_find_time_slots(state,
1001 params[next_index].port->mgr,
1002 params[next_index].port,
1003 vars[next_index].pbn);
1004 if (ret < 0)
1005 return ret;
1006
1007 ret = drm_dp_mst_atomic_check(state);
1008 if (ret == 0) {
1009 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
1010 } else {
1011 vars[next_index].pbn -= initial_slack[next_index];
1012 ret = drm_dp_atomic_find_time_slots(state,
1013 params[next_index].port->mgr,
1014 params[next_index].port,
1015 vars[next_index].pbn);
1016 if (ret < 0)
1017 return ret;
1018 }
1019 }
1020
1021 bpp_increased[next_index] = true;
1022 remaining_to_increase--;
1023 }
1024 return 0;
1025 }
1026
try_disable_dsc(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)1027 static int try_disable_dsc(struct drm_atomic_state *state,
1028 struct dc_link *dc_link,
1029 struct dsc_mst_fairness_params *params,
1030 struct dsc_mst_fairness_vars *vars,
1031 int count,
1032 int k)
1033 {
1034 int i;
1035 bool tried[MAX_PIPES];
1036 int kbps_increase[MAX_PIPES];
1037 int max_kbps_increase;
1038 int next_index;
1039 int remaining_to_try = 0;
1040 int ret;
1041 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
1042 int var_pbn;
1043
1044 for (i = 0; i < count; i++) {
1045 if (vars[i + k].dsc_enabled
1046 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
1047 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
1048 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
1049 tried[i] = false;
1050 remaining_to_try += 1;
1051 } else {
1052 kbps_increase[i] = 0;
1053 tried[i] = true;
1054 }
1055 }
1056
1057 while (remaining_to_try) {
1058 next_index = -1;
1059 max_kbps_increase = -1;
1060 for (i = 0; i < count; i++) {
1061 if (!tried[i]) {
1062 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
1063 max_kbps_increase = kbps_increase[i];
1064 next_index = i;
1065 }
1066 }
1067 }
1068
1069 if (next_index == -1)
1070 break;
1071
1072 DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
1073 var_pbn = vars[next_index].pbn;
1074 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1075 ret = drm_dp_atomic_find_time_slots(state,
1076 params[next_index].port->mgr,
1077 params[next_index].port,
1078 vars[next_index].pbn);
1079 if (ret < 0) {
1080 DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
1081 __func__, __LINE__, next_index, ret);
1082 vars[next_index].pbn = var_pbn;
1083 return ret;
1084 }
1085
1086 ret = drm_dp_mst_atomic_check(state);
1087 if (ret == 0) {
1088 DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index);
1089 vars[next_index].dsc_enabled = false;
1090 vars[next_index].bpp_x16 = 0;
1091 } else {
1092 DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index);
1093 vars[next_index].pbn = var_pbn;
1094 ret = drm_dp_atomic_find_time_slots(state,
1095 params[next_index].port->mgr,
1096 params[next_index].port,
1097 vars[next_index].pbn);
1098 if (ret < 0) {
1099 DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
1100 __func__, __LINE__, next_index, ret);
1101 return ret;
1102 }
1103 }
1104
1105 tried[next_index] = true;
1106 remaining_to_try--;
1107 }
1108 return 0;
1109 }
1110
log_dsc_params(int count,struct dsc_mst_fairness_vars * vars,int k)1111 static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k)
1112 {
1113 int i;
1114
1115 for (i = 0; i < count; i++)
1116 DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n",
1117 i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn);
1118 }
1119
compute_mst_dsc_configs_for_link(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link,struct dsc_mst_fairness_vars * vars,struct drm_dp_mst_topology_mgr * mgr,int * link_vars_start_index)1120 static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1121 struct dc_state *dc_state,
1122 struct dc_link *dc_link,
1123 struct dsc_mst_fairness_vars *vars,
1124 struct drm_dp_mst_topology_mgr *mgr,
1125 int *link_vars_start_index)
1126 {
1127 struct dc_stream_state *stream;
1128 struct dsc_mst_fairness_params params[MAX_PIPES];
1129 struct amdgpu_dm_connector *aconnector;
1130 struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
1131 int count = 0;
1132 int i, k, ret;
1133 bool debugfs_overwrite = false;
1134 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
1135 struct drm_connector_state *new_conn_state;
1136
1137 memset(params, 0, sizeof(params));
1138
1139 if (IS_ERR(mst_state))
1140 return PTR_ERR(mst_state);
1141
1142 /* Set up params */
1143 DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count);
1144 for (i = 0; i < dc_state->stream_count; i++) {
1145 struct dc_dsc_policy dsc_policy = {0};
1146
1147 stream = dc_state->streams[i];
1148
1149 if (stream->link != dc_link)
1150 continue;
1151
1152 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1153 if (!aconnector)
1154 continue;
1155
1156 if (!aconnector->mst_output_port)
1157 continue;
1158
1159 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
1160
1161 if (!new_conn_state) {
1162 DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n",
1163 __func__, __LINE__, stream);
1164 continue;
1165 }
1166
1167 stream->timing.flags.DSC = 0;
1168
1169 params[count].timing = &stream->timing;
1170 params[count].sink = stream->sink;
1171 params[count].aconnector = aconnector;
1172 params[count].port = aconnector->mst_output_port;
1173 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
1174 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
1175 debugfs_overwrite = true;
1176 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
1177 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
1178 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
1179 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
1180 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
1181 if (!dc_dsc_compute_bandwidth_range(
1182 stream->sink->ctx->dc->res_pool->dscs[0],
1183 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
1184 dsc_policy.min_target_bpp * 16,
1185 dsc_policy.max_target_bpp * 16,
1186 &stream->sink->dsc_caps.dsc_dec_caps,
1187 &stream->timing,
1188 dc_link_get_highest_encoding_format(dc_link),
1189 ¶ms[count].bw_range))
1190 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
1191 dc_link_get_highest_encoding_format(dc_link));
1192
1193 DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n",
1194 count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps,
1195 params[count].bw_range.stream_kbps);
1196 count++;
1197 }
1198
1199 DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count);
1200
1201 if (count == 0) {
1202 ASSERT(0);
1203 return 0;
1204 }
1205
1206 /* k is start index of vars for current phy link used by mst hub */
1207 k = *link_vars_start_index;
1208 /* set vars start index for next mst hub phy link */
1209 *link_vars_start_index += count;
1210
1211 /* Try no compression */
1212 DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
1213 for (i = 0; i < count; i++) {
1214 vars[i + k].aconnector = params[i].aconnector;
1215 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1216 vars[i + k].dsc_enabled = false;
1217 vars[i + k].bpp_x16 = 0;
1218 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
1219 vars[i + k].pbn);
1220 if (ret < 0)
1221 return ret;
1222 }
1223 ret = drm_dp_mst_atomic_check(state);
1224 if (ret == 0 && !debugfs_overwrite) {
1225 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1226 return 0;
1227 } else if (ret != -ENOSPC) {
1228 return ret;
1229 }
1230
1231 log_dsc_params(count, vars, k);
1232
1233 /* Try max compression */
1234 DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
1235 for (i = 0; i < count; i++) {
1236 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
1237 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
1238 vars[i + k].dsc_enabled = true;
1239 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
1240 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1241 params[i].port, vars[i + k].pbn);
1242 if (ret < 0)
1243 return ret;
1244 } else {
1245 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1246 vars[i + k].dsc_enabled = false;
1247 vars[i + k].bpp_x16 = 0;
1248 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1249 params[i].port, vars[i + k].pbn);
1250 if (ret < 0)
1251 return ret;
1252 }
1253 }
1254 ret = drm_dp_mst_atomic_check(state);
1255 if (ret != 0)
1256 return ret;
1257
1258 log_dsc_params(count, vars, k);
1259
1260 /* Optimize degree of compression */
1261 DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n");
1262 ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
1263 if (ret < 0) {
1264 DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n");
1265 return ret;
1266 }
1267
1268 log_dsc_params(count, vars, k);
1269
1270 DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n");
1271 ret = try_disable_dsc(state, dc_link, params, vars, count, k);
1272 if (ret < 0) {
1273 DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n");
1274 return ret;
1275 }
1276
1277 log_dsc_params(count, vars, k);
1278
1279 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1280
1281 return 0;
1282 }
1283
is_dsc_need_re_compute(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link)1284 static bool is_dsc_need_re_compute(
1285 struct drm_atomic_state *state,
1286 struct dc_state *dc_state,
1287 struct dc_link *dc_link)
1288 {
1289 int i, j;
1290 bool is_dsc_need_re_compute = false;
1291 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES];
1292 int new_stream_on_link_num = 0;
1293 struct amdgpu_dm_connector *aconnector;
1294 struct dc_stream_state *stream;
1295 const struct dc *dc = dc_link->dc;
1296
1297 /* only check phy used by dsc mst branch */
1298 if (dc_link->type != dc_connection_mst_branch)
1299 goto out;
1300
1301 /* add a check for older MST DSC with no virtual DPCDs */
1302 if (needs_dsc_aux_workaround(dc_link) &&
1303 (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
1304 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
1305 goto out;
1306
1307 for (i = 0; i < MAX_PIPES; i++)
1308 stream_on_link[i] = NULL;
1309
1310 DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count);
1311
1312 /* check if there is mode change in new request */
1313 for (i = 0; i < dc_state->stream_count; i++) {
1314 struct drm_crtc_state *new_crtc_state;
1315 struct drm_connector_state *new_conn_state;
1316
1317 stream = dc_state->streams[i];
1318 if (!stream)
1319 continue;
1320
1321 DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream);
1322
1323 /* check if stream using the same link for mst */
1324 if (stream->link != dc_link)
1325 continue;
1326
1327 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
1328 if (!aconnector)
1329 continue;
1330
1331 stream_on_link[new_stream_on_link_num] = aconnector;
1332 new_stream_on_link_num++;
1333
1334 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
1335 if (!new_conn_state) {
1336 DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n",
1337 __func__, __LINE__, stream, aconnector);
1338 continue;
1339 }
1340
1341 if (IS_ERR(new_conn_state))
1342 continue;
1343
1344 if (!new_conn_state->crtc)
1345 continue;
1346
1347 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
1348 if (!new_crtc_state) {
1349 DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n",
1350 __func__, __LINE__, stream, aconnector);
1351 continue;
1352 }
1353
1354 if (IS_ERR(new_crtc_state))
1355 continue;
1356
1357 if (new_crtc_state->enable && new_crtc_state->active) {
1358 if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
1359 new_crtc_state->connectors_changed) {
1360 DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
1361 "stream 0x%p in new dc_state\n",
1362 __func__, __LINE__, stream);
1363 is_dsc_need_re_compute = true;
1364 goto out;
1365 }
1366 }
1367 }
1368
1369 if (new_stream_on_link_num == 0) {
1370 DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n",
1371 __func__, __LINE__);
1372 is_dsc_need_re_compute = false;
1373 goto out;
1374 }
1375
1376 DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n",
1377 __func__, dc->current_state->stream_count);
1378
1379 /* check current_state if there stream on link but it is not in
1380 * new request state
1381 */
1382 for (i = 0; i < dc->current_state->stream_count; i++) {
1383 stream = dc->current_state->streams[i];
1384 /* only check stream on the mst hub */
1385 if (stream->link != dc_link)
1386 continue;
1387
1388 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1389 if (!aconnector)
1390 continue;
1391
1392 for (j = 0; j < new_stream_on_link_num; j++) {
1393 if (stream_on_link[j]) {
1394 if (aconnector == stream_on_link[j])
1395 break;
1396 }
1397 }
1398
1399 if (j == new_stream_on_link_num) {
1400 /* not in new state */
1401 DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
1402 "stream 0x%p in current dc_state but not in new dc_state\n",
1403 __func__, __LINE__, stream);
1404 is_dsc_need_re_compute = true;
1405 break;
1406 }
1407 }
1408
1409 out:
1410 DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n",
1411 __func__, is_dsc_need_re_compute ? "required" : "not required");
1412
1413 return is_dsc_need_re_compute;
1414 }
1415
compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)1416 int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1417 struct dc_state *dc_state,
1418 struct dsc_mst_fairness_vars *vars)
1419 {
1420 int i, j;
1421 struct dc_stream_state *stream;
1422 bool computed_streams[MAX_PIPES];
1423 struct amdgpu_dm_connector *aconnector;
1424 struct drm_dp_mst_topology_mgr *mst_mgr;
1425 struct resource_pool *res_pool;
1426 int link_vars_start_index = 0;
1427 int ret = 0;
1428
1429 for (i = 0; i < dc_state->stream_count; i++)
1430 computed_streams[i] = false;
1431
1432 for (i = 0; i < dc_state->stream_count; i++) {
1433 stream = dc_state->streams[i];
1434 res_pool = stream->ctx->dc->res_pool;
1435
1436 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1437 continue;
1438
1439 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1440
1441 DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n",
1442 __func__, stream, aconnector);
1443
1444 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1445 continue;
1446
1447 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1448 continue;
1449
1450 if (computed_streams[i])
1451 continue;
1452
1453 if (res_pool->funcs->remove_stream_from_ctx &&
1454 res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
1455 return -EINVAL;
1456
1457 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1458 continue;
1459
1460 mst_mgr = aconnector->mst_output_port->mgr;
1461 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1462 &link_vars_start_index);
1463 if (ret != 0)
1464 return ret;
1465
1466 for (j = 0; j < dc_state->stream_count; j++) {
1467 if (dc_state->streams[j]->link == stream->link)
1468 computed_streams[j] = true;
1469 }
1470 }
1471
1472 for (i = 0; i < dc_state->stream_count; i++) {
1473 stream = dc_state->streams[i];
1474
1475 if (stream->timing.flags.DSC == 1)
1476 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) {
1477 DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n",
1478 __func__, __LINE__, stream);
1479 return -EINVAL;
1480 }
1481 }
1482
1483 return ret;
1484 }
1485
pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)1486 static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1487 struct dc_state *dc_state,
1488 struct dsc_mst_fairness_vars *vars)
1489 {
1490 int i, j;
1491 struct dc_stream_state *stream;
1492 bool computed_streams[MAX_PIPES];
1493 struct amdgpu_dm_connector *aconnector;
1494 struct drm_dp_mst_topology_mgr *mst_mgr;
1495 int link_vars_start_index = 0;
1496 int ret = 0;
1497
1498 for (i = 0; i < dc_state->stream_count; i++)
1499 computed_streams[i] = false;
1500
1501 for (i = 0; i < dc_state->stream_count; i++) {
1502 stream = dc_state->streams[i];
1503
1504 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1505 continue;
1506
1507 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1508
1509 DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n",
1510 i, stream, aconnector);
1511
1512 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1513 continue;
1514
1515 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1516 continue;
1517
1518 if (computed_streams[i])
1519 continue;
1520
1521 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1522 continue;
1523
1524 mst_mgr = aconnector->mst_output_port->mgr;
1525 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1526 &link_vars_start_index);
1527 if (ret != 0)
1528 return ret;
1529
1530 for (j = 0; j < dc_state->stream_count; j++) {
1531 if (dc_state->streams[j]->link == stream->link)
1532 computed_streams[j] = true;
1533 }
1534 }
1535
1536 return ret;
1537 }
1538
find_crtc_index_in_state_by_stream(struct drm_atomic_state * state,struct dc_stream_state * stream)1539 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
1540 struct dc_stream_state *stream)
1541 {
1542 int i;
1543 struct drm_crtc *crtc;
1544 struct drm_crtc_state *new_state, *old_state;
1545
1546 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) {
1547 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state);
1548
1549 if (dm_state->stream == stream)
1550 return i;
1551 }
1552 return -1;
1553 }
1554
is_link_to_dschub(struct dc_link * dc_link)1555 static bool is_link_to_dschub(struct dc_link *dc_link)
1556 {
1557 union dpcd_dsc_basic_capabilities *dsc_caps =
1558 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps;
1559
1560 /* only check phy used by dsc mst branch */
1561 if (dc_link->type != dc_connection_mst_branch)
1562 return false;
1563
1564 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT ||
1565 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1566 return false;
1567 return true;
1568 }
1569
is_dsc_precompute_needed(struct drm_atomic_state * state)1570 static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
1571 {
1572 int i;
1573 struct drm_crtc *crtc;
1574 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1575 bool ret = false;
1576
1577 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1578 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state);
1579
1580 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) {
1581 ret = false;
1582 break;
1583 }
1584 if (dm_crtc_state->stream && dm_crtc_state->stream->link)
1585 if (is_link_to_dschub(dm_crtc_state->stream->link))
1586 ret = true;
1587 }
1588 return ret;
1589 }
1590
pre_validate_dsc(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state_ptr,struct dsc_mst_fairness_vars * vars)1591 int pre_validate_dsc(struct drm_atomic_state *state,
1592 struct dm_atomic_state **dm_state_ptr,
1593 struct dsc_mst_fairness_vars *vars)
1594 {
1595 int i;
1596 struct dm_atomic_state *dm_state;
1597 struct dc_state *local_dc_state = NULL;
1598 int ret = 0;
1599
1600 if (!is_dsc_precompute_needed(state)) {
1601 DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__);
1602 return 0;
1603 }
1604 ret = dm_atomic_get_state(state, dm_state_ptr);
1605 if (ret != 0) {
1606 DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__);
1607 return ret;
1608 }
1609 dm_state = *dm_state_ptr;
1610
1611 /*
1612 * create local vailable for dc_state. copy content of streams of dm_state->context
1613 * to local variable. make sure stream pointer of local variable not the same as stream
1614 * from dm_state->context.
1615 */
1616
1617 local_dc_state = vmalloc(sizeof(struct dc_state));
1618 if (!local_dc_state)
1619 return -ENOMEM;
1620 memcpy(local_dc_state, dm_state->context, sizeof(struct dc_state));
1621
1622 for (i = 0; i < local_dc_state->stream_count; i++) {
1623 struct dc_stream_state *stream = dm_state->context->streams[i];
1624 int ind = find_crtc_index_in_state_by_stream(state, stream);
1625
1626 if (ind >= 0) {
1627 struct drm_connector *connector;
1628 struct amdgpu_dm_connector *aconnector;
1629 struct drm_connector_state *drm_new_conn_state;
1630 struct dm_connector_state *dm_new_conn_state;
1631 struct dm_crtc_state *dm_old_crtc_state;
1632
1633 connector =
1634 amdgpu_dm_find_first_crtc_matching_connector(state,
1635 state->crtcs[ind].ptr);
1636 aconnector = to_amdgpu_dm_connector(connector);
1637 drm_new_conn_state =
1638 drm_atomic_get_new_connector_state(state,
1639 &aconnector->base);
1640 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
1641 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
1642
1643 local_dc_state->streams[i] =
1644 create_validate_stream_for_sink(aconnector,
1645 &state->crtcs[ind].new_state->mode,
1646 dm_new_conn_state,
1647 dm_old_crtc_state->stream);
1648 if (local_dc_state->streams[i] == NULL) {
1649 ret = -EINVAL;
1650 break;
1651 }
1652 }
1653 }
1654
1655 if (ret != 0)
1656 goto clean_exit;
1657
1658 ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
1659 if (ret != 0) {
1660 DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n",
1661 __func__, __LINE__);
1662 ret = -EINVAL;
1663 goto clean_exit;
1664 }
1665
1666 /*
1667 * compare local_streams -> timing with dm_state->context,
1668 * if the same set crtc_state->mode-change = 0;
1669 */
1670 for (i = 0; i < local_dc_state->stream_count; i++) {
1671 struct dc_stream_state *stream = dm_state->context->streams[i];
1672
1673 if (local_dc_state->streams[i] &&
1674 dc_is_timing_changed(stream, local_dc_state->streams[i])) {
1675 DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i);
1676 } else {
1677 int ind = find_crtc_index_in_state_by_stream(state, stream);
1678
1679 if (ind >= 0) {
1680 DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
1681 __func__, __LINE__, stream);
1682 state->crtcs[ind].new_state->mode_changed = 0;
1683 }
1684 }
1685 }
1686 clean_exit:
1687 for (i = 0; i < local_dc_state->stream_count; i++) {
1688 struct dc_stream_state *stream = dm_state->context->streams[i];
1689
1690 if (local_dc_state->streams[i] != stream)
1691 dc_stream_release(local_dc_state->streams[i]);
1692 }
1693
1694 vfree(local_dc_state);
1695
1696 return ret;
1697 }
1698
kbps_from_pbn(unsigned int pbn)1699 static uint32_t kbps_from_pbn(unsigned int pbn)
1700 {
1701 uint64_t kbps = (uint64_t)pbn;
1702
1703 kbps *= (1000000 / PEAK_FACTOR_X1000);
1704 kbps *= 8;
1705 kbps *= 54;
1706 kbps /= 64;
1707
1708 return (uint32_t)kbps;
1709 }
1710
is_dsc_common_config_possible(struct dc_stream_state * stream,struct dc_dsc_bw_range * bw_range)1711 static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
1712 struct dc_dsc_bw_range *bw_range)
1713 {
1714 struct dc_dsc_policy dsc_policy = {0};
1715
1716 dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
1717 dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
1718 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
1719 dsc_policy.min_target_bpp * 16,
1720 dsc_policy.max_target_bpp * 16,
1721 &stream->sink->dsc_caps.dsc_dec_caps,
1722 &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
1723
1724 return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
1725 }
1726 #endif
1727
1728 #if defined(CONFIG_DRM_AMD_DC_FP)
dp_get_link_current_set_bw(struct drm_dp_aux * aux,uint32_t * cur_link_bw)1729 static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw)
1730 {
1731 uint32_t total_data_bw_efficiency_x10000 = 0;
1732 uint32_t link_rate_per_lane_kbps = 0;
1733 enum dc_link_rate link_rate;
1734 union lane_count_set lane_count;
1735 u8 dp_link_encoding;
1736 u8 link_bw_set = 0;
1737
1738 *cur_link_bw = 0;
1739
1740 if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
1741 drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
1742 drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
1743 return false;
1744
1745 switch (dp_link_encoding) {
1746 case DP_8b_10b_ENCODING:
1747 link_rate = link_bw_set;
1748 link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
1749 total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
1750 total_data_bw_efficiency_x10000 /= 100;
1751 total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
1752 break;
1753 case DP_128b_132b_ENCODING:
1754 switch (link_bw_set) {
1755 case DP_LINK_BW_10:
1756 link_rate = LINK_RATE_UHBR10;
1757 break;
1758 case DP_LINK_BW_13_5:
1759 link_rate = LINK_RATE_UHBR13_5;
1760 break;
1761 case DP_LINK_BW_20:
1762 link_rate = LINK_RATE_UHBR20;
1763 break;
1764 default:
1765 return false;
1766 }
1767
1768 link_rate_per_lane_kbps = link_rate * 10000;
1769 total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
1770 break;
1771 default:
1772 return false;
1773 }
1774
1775 *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000;
1776 return true;
1777 }
1778 #endif
1779
dm_dp_mst_is_port_support_mode(struct amdgpu_dm_connector * aconnector,struct dc_stream_state * stream)1780 enum dc_status dm_dp_mst_is_port_support_mode(
1781 struct amdgpu_dm_connector *aconnector,
1782 struct dc_stream_state *stream)
1783 {
1784 #if defined(CONFIG_DRM_AMD_DC_FP)
1785 int branch_max_throughput_mps = 0;
1786 struct dc_link_settings cur_link_settings;
1787 uint32_t end_to_end_bw_in_kbps = 0;
1788 uint32_t root_link_bw_in_kbps = 0;
1789 uint32_t virtual_channel_bw_in_kbps = 0;
1790 struct dc_dsc_bw_range bw_range = {0};
1791 struct dc_dsc_config_options dsc_options = {0};
1792 uint32_t stream_kbps;
1793
1794 /* DSC unnecessary case
1795 * Check if timing could be supported within end-to-end BW
1796 */
1797 stream_kbps =
1798 dc_bandwidth_in_kbps_from_timing(&stream->timing,
1799 dc_link_get_highest_encoding_format(stream->link));
1800 cur_link_settings = stream->link->verified_link_cap;
1801 root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
1802 virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
1803
1804 /* pick the end to end bw bottleneck */
1805 end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
1806
1807 if (stream_kbps <= end_to_end_bw_in_kbps) {
1808 DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n");
1809 return DC_OK;
1810 }
1811
1812 /*DSC necessary case*/
1813 if (!aconnector->dsc_aux)
1814 return DC_FAIL_BANDWIDTH_VALIDATE;
1815
1816 if (is_dsc_common_config_possible(stream, &bw_range)) {
1817
1818 /*capable of dsc passthough. dsc bitstream along the entire path*/
1819 if (aconnector->mst_output_port->passthrough_aux) {
1820 if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
1821 DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint"
1822 "Max dsc compression bw can't fit into end-to-end bw\n");
1823 return DC_FAIL_BANDWIDTH_VALIDATE;
1824 }
1825 } else {
1826 /*dsc bitstream decoded at the dp last link*/
1827 struct drm_dp_mst_port *immediate_upstream_port = NULL;
1828 uint32_t end_link_bw = 0;
1829
1830 /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
1831 if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
1832 aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
1833 if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
1834 dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
1835 aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
1836 aconnector->mst_local_bw = end_link_bw;
1837 } else {
1838 end_link_bw = aconnector->mst_local_bw;
1839 }
1840
1841 if (end_link_bw > 0 && stream_kbps > end_link_bw) {
1842 DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
1843 "Mode required bw can't fit into last link\n");
1844 return DC_FAIL_BANDWIDTH_VALIDATE;
1845 }
1846 }
1847
1848 /*Get virtual channel bandwidth between source and the link before the last link*/
1849 if (aconnector->mst_output_port->parent->port_parent)
1850 immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
1851
1852 if (immediate_upstream_port) {
1853 virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
1854 virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
1855 } else {
1856 /* For topology LCT 1 case - only one mstb*/
1857 virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
1858 }
1859
1860 if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
1861 DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
1862 "Max dsc compression can't fit into MST available bw\n");
1863 return DC_FAIL_BANDWIDTH_VALIDATE;
1864 }
1865 }
1866
1867 /*Confirm if we can obtain dsc config*/
1868 dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
1869 dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
1870 if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
1871 &stream->sink->dsc_caps.dsc_dec_caps,
1872 &dsc_options,
1873 end_to_end_bw_in_kbps,
1874 &stream->timing,
1875 dc_link_get_highest_encoding_format(stream->link),
1876 &stream->timing.dsc_cfg)) {
1877 stream->timing.flags.DSC = 1;
1878 DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n");
1879 } else {
1880 DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n");
1881 return DC_FAIL_BANDWIDTH_VALIDATE;
1882 }
1883
1884 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
1885 switch (stream->timing.pixel_encoding) {
1886 case PIXEL_ENCODING_RGB:
1887 case PIXEL_ENCODING_YCBCR444:
1888 branch_max_throughput_mps =
1889 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
1890 break;
1891 case PIXEL_ENCODING_YCBCR422:
1892 case PIXEL_ENCODING_YCBCR420:
1893 branch_max_throughput_mps =
1894 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
1895 break;
1896 default:
1897 break;
1898 }
1899
1900 if (branch_max_throughput_mps != 0 &&
1901 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
1902 DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n");
1903 return DC_FAIL_BANDWIDTH_VALIDATE;
1904 }
1905 } else {
1906 DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n");
1907 return DC_FAIL_BANDWIDTH_VALIDATE;
1908 }
1909 #endif
1910 return DC_OK;
1911 }
1912