xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2015 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <drm/drm_crtc.h>
28 #include <drm/drm_vblank.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_dm.h"
32 #include "dc.h"
33 #include "amdgpu_securedisplay.h"
34 #include "amdgpu_dm_psr.h"
35 #include "amdgpu_dm_replay.h"
36 
37 static const char *const pipe_crc_sources[] = {
38 	"none",
39 	"crtc",
40 	"crtc dither",
41 	"dprx",
42 	"dprx dither",
43 	"auto",
44 };
45 
dm_parse_crc_source(const char * source)46 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
47 {
48 	if (!source || !strcmp(source, "none"))
49 		return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
50 	if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
51 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
52 	if (!strcmp(source, "dprx"))
53 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
54 	if (!strcmp(source, "crtc dither"))
55 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
56 	if (!strcmp(source, "dprx dither"))
57 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
58 
59 	return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
60 }
61 
dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)62 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
63 {
64 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
65 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
66 }
67 
dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)68 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
69 {
70 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
71 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
72 }
73 
dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)74 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
75 {
76 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
77 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
78 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
79 }
80 
amdgpu_dm_crtc_get_crc_sources(struct drm_crtc * crtc,size_t * count)81 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
82 						  size_t *count)
83 {
84 	*count = ARRAY_SIZE(pipe_crc_sources);
85 	return pipe_crc_sources;
86 }
87 
88 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
update_phy_id_mapping(struct amdgpu_device * adev)89 static void update_phy_id_mapping(struct amdgpu_device *adev)
90 {
91 	struct drm_device *ddev = adev_to_drm(adev);
92 	struct amdgpu_display_manager *dm = &adev->dm;
93 	struct drm_connector *connector;
94 	struct amdgpu_dm_connector *aconnector;
95 	struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
96 	struct drm_connector_list_iter iter;
97 	uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
98 
99 	dm->secure_display_ctx.phy_mapping_updated = false;
100 
101 	mutex_lock(&ddev->mode_config.mutex);
102 	drm_connector_list_iter_begin(ddev, &iter);
103 	drm_for_each_connector_iter(connector, &iter) {
104 
105 		if (connector->status != connector_status_connected)
106 			continue;
107 
108 		if (idx >= AMDGPU_DM_MAX_CRTC) {
109 			drm_warn(adev_to_drm(adev),
110 				"%s connected connectors exceed max crtc\n",
111 				__func__);
112 			mutex_unlock(&ddev->mode_config.mutex);
113 			return;
114 		}
115 
116 		aconnector = to_amdgpu_dm_connector(connector);
117 
118 		sort_connector[idx] = aconnector;
119 		idx++;
120 		connector_cnt++;
121 	}
122 	drm_connector_list_iter_end(&iter);
123 
124 	/* sort connectors by link_enc_hw_instance first */
125 	for (idx = connector_cnt; idx > 1 ; idx--) {
126 		for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
127 			if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
128 			    sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
129 				swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
130 		}
131 	}
132 
133 	/*
134 	 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
135 	 * sorted together above.
136 	 */
137 	for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
138 		if (sort_connector[idx]->mst_root) {
139 			uint8_t i, j, k;
140 			uint8_t mst_con_cnt = 1;
141 
142 			for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
143 				if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
144 					mst_con_cnt++;
145 				else
146 					break;
147 			}
148 
149 			for (i = mst_con_cnt; i > 1; i--) {
150 				for (j = idx; j < (idx + i - 2); j++) {
151 					int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
152 					int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
153 					u8 *rad;
154 					u8 *next_rad;
155 					bool swap = false;
156 
157 					/* Sort by mst tree depth first. Then compare RAD if depth is the same*/
158 					if (mstb_lct > next_mstb_lct) {
159 						swap = true;
160 					} else if (mstb_lct == next_mstb_lct) {
161 						if (mstb_lct == 1) {
162 							if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
163 								swap = true;
164 						} else if (mstb_lct > 1) {
165 							rad = sort_connector[j]->mst_output_port->parent->rad;
166 							next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
167 
168 							for (k = 0; k < mstb_lct - 1; k++) {
169 								int shift = (k % 2) ? 0 : 4;
170 								int port_num = (rad[k / 2] >> shift) & 0xf;
171 								int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
172 
173 								if (port_num > next_port_num) {
174 									swap = true;
175 									break;
176 								}
177 							}
178 						} else {
179 							DRM_ERROR("MST LCT shouldn't be set as < 1");
180 							mutex_unlock(&ddev->mode_config.mutex);
181 							return;
182 						}
183 					}
184 
185 					if (swap)
186 						swap(sort_connector[j], sort_connector[j + 1]);
187 				}
188 			}
189 
190 			idx += mst_con_cnt;
191 		} else {
192 			idx++;
193 		}
194 	}
195 
196 	/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
197 	memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
198 	for (idx = 0; idx < connector_cnt; idx++) {
199 		aconnector = sort_connector[idx];
200 
201 		dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
202 		dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
203 		dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
204 
205 		if (sort_connector[idx]->mst_root) {
206 			dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
207 			dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
208 			dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
209 			memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
210 				aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
211 		}
212 	}
213 	mutex_unlock(&ddev->mode_config.mutex);
214 
215 	dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
216 	dm->secure_display_ctx.phy_mapping_updated = true;
217 }
218 
get_phy_id(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint8_t * phy_id)219 static bool get_phy_id(struct amdgpu_display_manager *dm,
220 			struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
221 {
222 	int idx, idx_2;
223 	bool found = false;
224 
225 	/*
226 	 * Assume secure display start after all connectors are probed. The connection
227 	 * config is static as well
228 	 */
229 	if (!dm->secure_display_ctx.phy_mapping_updated) {
230 		DRM_WARN("%s Should update the phy id table before get it's value", __func__);
231 		return false;
232 	}
233 
234 	for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
235 		if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
236 			DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
237 			return false;
238 		}
239 
240 		if (aconnector->dc_link->link_enc_hw_inst ==
241 				dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
242 			if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
243 				found = true;
244 				goto out;
245 			} else {
246 				/* Could caused by wrongly pass mst root connector */
247 				if (!aconnector->mst_output_port) {
248 					DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
249 					return false;
250 				}
251 
252 				if (aconnector->mst_root &&
253 					aconnector->mst_root->mst_mgr.mst_primary == NULL) {
254 					DRM_WARN("%s pass in a stale mst connector", __func__);
255 				}
256 
257 				if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
258 					aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
259 					if (aconnector->mst_output_port->parent->lct == 1) {
260 						found = true;
261 						goto out;
262 					} else if (aconnector->mst_output_port->parent->lct > 1) {
263 						/* Check RAD */
264 						for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
265 							int shift = (idx_2 % 2) ? 0 : 4;
266 							int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
267 							int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
268 
269 							if (port_num != port_num2)
270 								break;
271 						}
272 
273 						if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
274 							found = true;
275 							goto out;
276 						}
277 					} else {
278 						DRM_ERROR("lCT should be >= 1");
279 						return false;
280 					}
281 				}
282 			}
283 		}
284 	}
285 
286 out:
287 	if (found) {
288 		DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
289 		*phy_id = idx;
290 	} else {
291 		DRM_WARN("Can't find associated phy ID");
292 		return false;
293 	}
294 
295 	return true;
296 }
297 
amdgpu_dm_set_crc_window_default(struct drm_crtc * crtc,struct dc_stream_state * stream)298 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
299 {
300 	struct drm_device *drm_dev = crtc->dev;
301 	struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
302 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
303 	struct amdgpu_dm_connector *aconnector;
304 	bool was_activated;
305 	uint8_t phy_id;
306 	unsigned long flags;
307 	int i;
308 
309 	spin_lock_irqsave(&drm_dev->event_lock, flags);
310 	was_activated = acrtc->dm_irq_params.crc_window_activated;
311 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
312 		acrtc->dm_irq_params.window_param[i].x_start = 0;
313 		acrtc->dm_irq_params.window_param[i].y_start = 0;
314 		acrtc->dm_irq_params.window_param[i].x_end = 0;
315 		acrtc->dm_irq_params.window_param[i].y_end = 0;
316 		acrtc->dm_irq_params.window_param[i].enable = false;
317 		acrtc->dm_irq_params.window_param[i].update_win = false;
318 		acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
319 	}
320 	acrtc->dm_irq_params.crc_window_activated = false;
321 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
322 
323 	/* Disable secure_display if it was enabled */
324 	if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
325 		/* stop ROI update on this crtc */
326 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
327 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
328 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
329 
330 		if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
331 			if (dm->secure_display_ctx.support_mul_roi)
332 				dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
333 			else
334 				dc_stream_forward_crc_window(stream, NULL, phy_id, true);
335 		} else {
336 			DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
337 		}
338 	}
339 }
340 
amdgpu_dm_crtc_notify_ta_to_read(struct work_struct * work)341 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
342 {
343 	struct secure_display_crtc_context *crtc_ctx;
344 	struct psp_context *psp;
345 	struct ta_securedisplay_cmd *securedisplay_cmd;
346 	struct drm_crtc *crtc;
347 	struct dc_stream_state *stream;
348 	struct amdgpu_dm_connector *aconnector;
349 	uint8_t phy_inst;
350 	struct amdgpu_display_manager *dm;
351 	struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
352 	unsigned long flags;
353 	uint8_t roi_idx = 0;
354 	int ret;
355 	int i;
356 
357 	crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
358 	crtc = crtc_ctx->crtc;
359 
360 	if (!crtc)
361 		return;
362 
363 	psp = &drm_to_adev(crtc->dev)->psp;
364 
365 	if (!psp->securedisplay_context.context.initialized) {
366 		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
367 		return;
368 	}
369 
370 	dm = &drm_to_adev(crtc->dev)->dm;
371 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
372 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
373 	if (!aconnector)
374 		return;
375 
376 	mutex_lock(&crtc->dev->mode_config.mutex);
377 	if (!get_phy_id(dm, aconnector, &phy_inst)) {
378 		DRM_WARN("%s Can't find mapping phy id!", __func__);
379 		mutex_unlock(&crtc->dev->mode_config.mutex);
380 		return;
381 	}
382 	mutex_unlock(&crtc->dev->mode_config.mutex);
383 
384 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
385 	memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
386 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
387 
388 	/* need lock for multiple crtcs to use the command buffer */
389 	mutex_lock(&psp->securedisplay_context.mutex);
390 	/* PSP TA is expected to finish data transmission over I2C within current frame,
391 	 * even there are up to 4 crtcs request to send in this frame.
392 	 */
393 	if (dm->secure_display_ctx.support_mul_roi) {
394 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
395 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
396 
397 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
398 
399 		for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
400 			if (crc_cpy[i].crc_ready)
401 				roi_idx |= 1 << i;
402 		}
403 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
404 
405 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
406 	} else {
407 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
408 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
409 
410 		securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
411 
412 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
413 	}
414 
415 	if (!ret) {
416 		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
417 			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
418 	}
419 
420 	mutex_unlock(&psp->securedisplay_context.mutex);
421 }
422 
423 static void
amdgpu_dm_forward_crc_window(struct work_struct * work)424 amdgpu_dm_forward_crc_window(struct work_struct *work)
425 {
426 	struct secure_display_crtc_context *crtc_ctx;
427 	struct amdgpu_display_manager *dm;
428 	struct drm_crtc *crtc;
429 	struct dc_stream_state *stream;
430 	struct amdgpu_dm_connector *aconnector;
431 	struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
432 	unsigned long flags;
433 	uint8_t phy_id;
434 
435 	crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
436 	crtc = crtc_ctx->crtc;
437 
438 	if (!crtc)
439 		return;
440 
441 	dm = &drm_to_adev(crtc->dev)->dm;
442 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
443 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
444 
445 	if (!aconnector)
446 		return;
447 
448 	mutex_lock(&crtc->dev->mode_config.mutex);
449 	if (!get_phy_id(dm, aconnector, &phy_id)) {
450 		DRM_WARN("%s Can't find mapping phy id!", __func__);
451 		mutex_unlock(&crtc->dev->mode_config.mutex);
452 		return;
453 	}
454 	mutex_unlock(&crtc->dev->mode_config.mutex);
455 
456 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
457 	memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
458 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
459 
460 	mutex_lock(&dm->dc_lock);
461 	if (dm->secure_display_ctx.support_mul_roi)
462 		dc_stream_forward_multiple_crc_window(stream, roi_cpy,
463 			phy_id, false);
464 	else
465 		dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
466 			phy_id, false);
467 	mutex_unlock(&dm->dc_lock);
468 }
469 
amdgpu_dm_crc_window_is_activated(struct drm_crtc * crtc)470 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
471 {
472 	struct drm_device *drm_dev = crtc->dev;
473 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
474 	bool ret = false;
475 
476 	spin_lock_irq(&drm_dev->event_lock);
477 	ret = acrtc->dm_irq_params.crc_window_activated;
478 	spin_unlock_irq(&drm_dev->event_lock);
479 
480 	return ret;
481 }
482 #endif
483 
484 int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)485 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
486 				 size_t *values_cnt)
487 {
488 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
489 
490 	if (source < 0) {
491 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
492 				 src_name, crtc->index);
493 		return -EINVAL;
494 	}
495 
496 	*values_cnt = 3;
497 	return 0;
498 }
499 
amdgpu_dm_crtc_configure_crc_source(struct drm_crtc * crtc,struct dm_crtc_state * dm_crtc_state,enum amdgpu_dm_pipe_crc_source source)500 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
501 					struct dm_crtc_state *dm_crtc_state,
502 					enum amdgpu_dm_pipe_crc_source source)
503 {
504 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
505 	struct dc_stream_state *stream_state = dm_crtc_state->stream;
506 	struct amdgpu_dm_connector *aconnector = NULL;
507 	bool enable = amdgpu_dm_is_valid_crc_source(source);
508 	int ret = 0;
509 	enum crc_poly_mode crc_poly_mode = CRC_POLY_MODE_16;
510 
511 	/* Configuration will be deferred to stream enable. */
512 	if (!stream_state)
513 		return -EINVAL;
514 
515 	/* Get connector from stream */
516 	aconnector = (struct amdgpu_dm_connector *)stream_state->dm_stream_context;
517 
518 	mutex_lock(&adev->dm.dc_lock);
519 
520 
521 	if (enable) {
522 		/* For PSR1, check that the panel has exited PSR */
523 		if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
524 			amdgpu_dm_psr_wait_disable(stream_state);
525 
526 		/* Set flag to disallow enter replay when CRC source is enabled */
527 		if (aconnector)
528 			aconnector->disallow_edp_enter_replay = true;
529 		amdgpu_dm_replay_disable(stream_state);
530 	}
531 
532 	/* CRC polynomial selection only support for DCN3.6+ except DCN4.0.1 */
533 	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 6, 0)) &&
534 		(amdgpu_ip_version(adev, DCE_HWIP, 0) != IP_VERSION(4, 0, 1))) {
535 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
536 
537 		crc_poly_mode = acrtc->dm_irq_params.crc_poly_mode;
538 	}
539 
540 	/* Enable or disable CRTC CRC generation */
541 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
542 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
543 					     stream_state, NULL, enable, enable, 0, true, crc_poly_mode)) {
544 			ret = -EINVAL;
545 			goto unlock;
546 		}
547 	}
548 
549 	/* Configure dithering */
550 	if (!dm_need_crc_dither(source)) {
551 		dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
552 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
553 					    DYN_EXPANSION_DISABLE);
554 	} else {
555 		dc_stream_set_dither_option(stream_state,
556 					    DITHER_OPTION_DEFAULT);
557 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
558 					    DYN_EXPANSION_AUTO);
559 	}
560 
561 	if (!enable) {
562 		/* Clear flag to allow enter replay when CRC source is disabled */
563 		if (aconnector)
564 			aconnector->disallow_edp_enter_replay = false;
565 	}
566 
567 unlock:
568 	mutex_unlock(&adev->dm.dc_lock);
569 
570 	return ret;
571 }
572 
amdgpu_dm_crtc_set_crc_source(struct drm_crtc * crtc,const char * src_name)573 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
574 {
575 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
576 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
577 	struct drm_crtc_commit *commit;
578 	struct dm_crtc_state *crtc_state;
579 	struct drm_device *drm_dev = crtc->dev;
580 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
581 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
582 	struct amdgpu_display_manager *dm = &adev->dm;
583 #endif
584 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
585 	struct drm_dp_aux *aux = NULL;
586 	bool enable = false;
587 	bool enabled = false;
588 	int ret = 0;
589 
590 	if (source < 0) {
591 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
592 				 src_name, crtc->index);
593 		return -EINVAL;
594 	}
595 
596 	ret = drm_modeset_lock(&crtc->mutex, NULL);
597 	if (ret)
598 		return ret;
599 
600 	spin_lock(&crtc->commit_lock);
601 	commit = list_first_entry_or_null(&crtc->commit_list,
602 					  struct drm_crtc_commit, commit_entry);
603 	if (commit)
604 		drm_crtc_commit_get(commit);
605 	spin_unlock(&crtc->commit_lock);
606 
607 	if (commit) {
608 		/*
609 		 * Need to wait for all outstanding programming to complete
610 		 * in commit tail since it can modify CRC related fields and
611 		 * hardware state. Since we're holding the CRTC lock we're
612 		 * guaranteed that no other commit work can be queued off
613 		 * before we modify the state below.
614 		 */
615 		ret = wait_for_completion_interruptible_timeout(
616 			&commit->hw_done, 10 * HZ);
617 		if (ret)
618 			goto cleanup;
619 	}
620 
621 	enable = amdgpu_dm_is_valid_crc_source(source);
622 	crtc_state = to_dm_crtc_state(crtc->state);
623 	spin_lock_irq(&drm_dev->event_lock);
624 	cur_crc_src = acrtc->dm_irq_params.crc_src;
625 	spin_unlock_irq(&drm_dev->event_lock);
626 
627 	/*
628 	 * USER REQ SRC | CURRENT SRC | BEHAVIOR
629 	 * -----------------------------
630 	 * None         | None        | Do nothing
631 	 * None         | CRTC        | Disable CRTC CRC, set default to dither
632 	 * None         | DPRX        | Disable DPRX CRC, need 'aux', set default to dither
633 	 * None         | CRTC DITHER | Disable CRTC CRC
634 	 * None         | DPRX DITHER | Disable DPRX CRC, need 'aux'
635 	 * CRTC         | XXXX        | Enable CRTC CRC, no dither
636 	 * DPRX         | XXXX        | Enable DPRX CRC, need 'aux', no dither
637 	 * CRTC DITHER  | XXXX        | Enable CRTC CRC, set dither
638 	 * DPRX DITHER  | XXXX        | Enable DPRX CRC, need 'aux', set dither
639 	 */
640 	if (dm_is_crc_source_dprx(source) ||
641 	    (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
642 	     dm_is_crc_source_dprx(cur_crc_src))) {
643 		struct amdgpu_dm_connector *aconn = NULL;
644 		struct drm_connector *connector;
645 		struct drm_connector_list_iter conn_iter;
646 
647 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
648 		drm_for_each_connector_iter(connector, &conn_iter) {
649 			if (!connector->state || connector->state->crtc != crtc)
650 				continue;
651 
652 			if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
653 				continue;
654 
655 			aconn = to_amdgpu_dm_connector(connector);
656 			break;
657 		}
658 		drm_connector_list_iter_end(&conn_iter);
659 
660 		if (!aconn) {
661 			DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
662 			ret = -EINVAL;
663 			goto cleanup;
664 		}
665 
666 		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
667 
668 		if (!aux) {
669 			DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
670 			ret = -EINVAL;
671 			goto cleanup;
672 		}
673 
674 		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
675 				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
676 			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
677 			ret = -EINVAL;
678 			goto cleanup;
679 		}
680 
681 	}
682 
683 	/*
684 	 * Reading the CRC requires the vblank interrupt handler to be
685 	 * enabled. Keep a reference until CRC capture stops.
686 	 */
687 	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
688 	if (!enabled && enable) {
689 		ret = drm_crtc_vblank_get(crtc);
690 		if (ret)
691 			goto cleanup;
692 	}
693 
694 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
695 	/* Reset secure_display when we change crc source from debugfs */
696 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
697 #endif
698 
699 	if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
700 		ret = -EINVAL;
701 		goto cleanup;
702 	}
703 
704 	if (!enabled && enable) {
705 		if (dm_is_crc_source_dprx(source)) {
706 			if (drm_dp_start_crc(aux, crtc)) {
707 				DRM_DEBUG_DRIVER("dp start crc failed\n");
708 				ret = -EINVAL;
709 				goto cleanup;
710 			}
711 		}
712 	} else if (enabled && !enable) {
713 		drm_crtc_vblank_put(crtc);
714 		if (dm_is_crc_source_dprx(source)) {
715 			if (drm_dp_stop_crc(aux)) {
716 				DRM_DEBUG_DRIVER("dp stop crc failed\n");
717 				ret = -EINVAL;
718 				goto cleanup;
719 			}
720 		}
721 	}
722 
723 	spin_lock_irq(&drm_dev->event_lock);
724 	acrtc->dm_irq_params.crc_src = source;
725 	spin_unlock_irq(&drm_dev->event_lock);
726 
727 	/* Reset crc_skipped on dm state */
728 	crtc_state->crc_skip_count = 0;
729 
730 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
731 	/* Initialize phy id mapping table for secure display*/
732 	if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
733 		!dm->secure_display_ctx.phy_mapping_updated)
734 		update_phy_id_mapping(adev);
735 #endif
736 
737 cleanup:
738 	if (commit)
739 		drm_crtc_commit_put(commit);
740 
741 	drm_modeset_unlock(&crtc->mutex);
742 
743 	return ret;
744 }
745 
746 /**
747  * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
748  * @crtc: DRM CRTC object.
749  *
750  * This function should be called at the end of a vblank, when the fb has been
751  * fully processed through the pipe.
752  */
amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc * crtc)753 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
754 {
755 	struct dm_crtc_state *crtc_state;
756 	struct dc_stream_state *stream_state;
757 	struct drm_device *drm_dev = NULL;
758 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
759 	struct amdgpu_crtc *acrtc = NULL;
760 	uint32_t crcs[3];
761 	unsigned long flags;
762 
763 	if (crtc == NULL)
764 		return;
765 
766 	crtc_state = to_dm_crtc_state(crtc->state);
767 	stream_state = crtc_state->stream;
768 	acrtc = to_amdgpu_crtc(crtc);
769 	drm_dev = crtc->dev;
770 
771 	spin_lock_irqsave(&drm_dev->event_lock, flags);
772 	cur_crc_src = acrtc->dm_irq_params.crc_src;
773 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
774 
775 	/* Early return if CRC capture is not enabled. */
776 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
777 		return;
778 
779 	/*
780 	 * Since flipping and crc enablement happen asynchronously, we - more
781 	 * often than not - will be returning an 'uncooked' crc on first frame.
782 	 * Probably because hw isn't ready yet. For added security, skip the
783 	 * first two CRC values.
784 	 */
785 	if (crtc_state->crc_skip_count < 2) {
786 		crtc_state->crc_skip_count += 1;
787 		return;
788 	}
789 
790 	if (dm_is_crc_source_crtc(cur_crc_src)) {
791 		if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
792 				       &crcs[0], &crcs[1], &crcs[2]))
793 			return;
794 
795 		drm_crtc_add_crc_entry(crtc, true,
796 				       drm_crtc_accurate_vblank_count(crtc), crcs);
797 	}
798 }
799 
800 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc * crtc)801 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
802 {
803 	struct drm_device *drm_dev = NULL;
804 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
805 	struct amdgpu_crtc *acrtc = NULL;
806 	struct amdgpu_device *adev = NULL;
807 	struct secure_display_crtc_context *crtc_ctx = NULL;
808 	bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
809 	uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
810 	uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
811 	uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
812 	unsigned long flags1;
813 	bool forward_roi_change = false;
814 	bool notify_ta = false;
815 	bool all_crc_ready = true;
816 	struct dc_stream_state *stream_state;
817 	int i;
818 
819 	if (crtc == NULL)
820 		return;
821 
822 	acrtc = to_amdgpu_crtc(crtc);
823 	adev = drm_to_adev(crtc->dev);
824 	drm_dev = crtc->dev;
825 	stream_state = to_dm_crtc_state(crtc->state)->stream;
826 
827 	spin_lock_irqsave(&drm_dev->event_lock, flags1);
828 	cur_crc_src = acrtc->dm_irq_params.crc_src;
829 
830 	/* Early return if CRC capture is not enabled. */
831 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
832 	    !dm_is_crc_source_crtc(cur_crc_src)) {
833 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
834 		return;
835 	}
836 
837 	if (!acrtc->dm_irq_params.crc_window_activated) {
838 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
839 		return;
840 	}
841 
842 	crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
843 	if (WARN_ON(crtc_ctx->crtc != crtc)) {
844 		/* We have set the crtc when creating secure_display_crtc_context,
845 		 * don't expect it to be changed here.
846 		 */
847 		crtc_ctx->crtc = crtc;
848 	}
849 
850 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
851 		struct crc_params crc_window = {
852 			.windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
853 			.windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
854 			.windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
855 			.windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
856 			.windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
857 			.windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
858 			.windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
859 			.windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
860 		};
861 
862 		crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
863 
864 		if (!acrtc->dm_irq_params.window_param[i].enable) {
865 			crtc_ctx->crc_info.crc[i].crc_ready = false;
866 			continue;
867 		}
868 
869 		if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
870 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
871 			crtc_ctx->crc_info.crc[i].crc_ready = false;
872 			continue;
873 		}
874 
875 		if (acrtc->dm_irq_params.window_param[i].update_win) {
876 			crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
877 			crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
878 			crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
879 						crc_window.windowa_x_start;
880 			crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
881 						crc_window.windowa_y_start;
882 
883 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
884 				/* forward task to dmub to update ROI */
885 				forward_roi_change = true;
886 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
887 				/* update ROI via dm*/
888 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
889 					&crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode);
890 
891 			reset_crc_frame_count[i] = true;
892 
893 			acrtc->dm_irq_params.window_param[i].update_win = false;
894 
895 			/* Statically skip 1 frame, because we may need to wait below things
896 			 * before sending ROI to dmub:
897 			 * 1. We defer the work by using system workqueue.
898 			 * 2. We may need to wait for dc_lock before accessing dmub.
899 			 */
900 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
901 			crtc_ctx->crc_info.crc[i].crc_ready = false;
902 		} else {
903 			if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
904 						&crc_r[i], &crc_g[i], &crc_b[i]))
905 				DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
906 
907 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
908 				/* forward task to psp to read ROI/CRC and output via I2C */
909 				notify_ta = true;
910 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
911 				/* Avoid ROI window get changed, keep overwriting. */
912 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
913 						&crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode);
914 
915 			/* crc ready for psp to read out */
916 			crtc_ctx->crc_info.crc[i].crc_ready = true;
917 		}
918 	}
919 
920 	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
921 
922 	if (forward_roi_change)
923 		schedule_work(&crtc_ctx->forward_roi_work);
924 
925 	if (notify_ta)
926 		schedule_work(&crtc_ctx->notify_ta_work);
927 
928 	spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
929 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
930 		crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
931 		crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
932 		crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
933 
934 		if (!crtc_ctx->roi[i].enable) {
935 			crtc_ctx->crc_info.crc[i].frame_count = 0;
936 			continue;
937 		}
938 
939 		if (!crtc_ctx->crc_info.crc[i].crc_ready)
940 			all_crc_ready = false;
941 
942 		if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
943 			/* Reset the reference frame count after user update the ROI
944 			 * or it reaches the maximum value.
945 			 */
946 			crtc_ctx->crc_info.crc[i].frame_count = 0;
947 		else
948 			crtc_ctx->crc_info.crc[i].frame_count += 1;
949 	}
950 	spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
951 
952 	if (all_crc_ready)
953 		complete_all(&crtc_ctx->crc_info.completion);
954 }
955 
amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device * adev)956 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
957 {
958 	struct secure_display_crtc_context *crtc_ctx = NULL;
959 	int i;
960 
961 	crtc_ctx = kzalloc_objs(struct secure_display_crtc_context,
962 				adev->mode_info.num_crtc);
963 
964 	if (!crtc_ctx) {
965 		adev->dm.secure_display_ctx.crtc_ctx = NULL;
966 		return;
967 	}
968 
969 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
970 		INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
971 		INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
972 		crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
973 		spin_lock_init(&crtc_ctx[i].crc_info.lock);
974 	}
975 
976 	adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
977 
978 	adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
979 }
980 #endif
981