xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <drm/drm_crtc.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_dm.h"
31 #include "dc.h"
32 #include "amdgpu_securedisplay.h"
33 #include "amdgpu_dm_psr.h"
34 
35 static const char *const pipe_crc_sources[] = {
36 	"none",
37 	"crtc",
38 	"crtc dither",
39 	"dprx",
40 	"dprx dither",
41 	"auto",
42 };
43 
dm_parse_crc_source(const char * source)44 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
45 {
46 	if (!source || !strcmp(source, "none"))
47 		return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
48 	if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
49 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
50 	if (!strcmp(source, "dprx"))
51 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
52 	if (!strcmp(source, "crtc dither"))
53 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
54 	if (!strcmp(source, "dprx dither"))
55 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
56 
57 	return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
58 }
59 
dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)60 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
61 {
62 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
63 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
64 }
65 
dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)66 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
67 {
68 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
69 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
70 }
71 
dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)72 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
73 {
74 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
75 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
76 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
77 }
78 
amdgpu_dm_crtc_get_crc_sources(struct drm_crtc * crtc,size_t * count)79 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
80 						  size_t *count)
81 {
82 	*count = ARRAY_SIZE(pipe_crc_sources);
83 	return pipe_crc_sources;
84 }
85 
86 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
update_phy_id_mapping(struct amdgpu_device * adev)87 static void update_phy_id_mapping(struct amdgpu_device *adev)
88 {
89 	struct drm_device *ddev = adev_to_drm(adev);
90 	struct amdgpu_display_manager *dm = &adev->dm;
91 	struct drm_connector *connector;
92 	struct amdgpu_dm_connector *aconnector;
93 	struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
94 	struct drm_connector_list_iter iter;
95 	uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
96 
97 	dm->secure_display_ctx.phy_mapping_updated = false;
98 
99 	mutex_lock(&ddev->mode_config.mutex);
100 	drm_connector_list_iter_begin(ddev, &iter);
101 	drm_for_each_connector_iter(connector, &iter) {
102 
103 		if (connector->status != connector_status_connected)
104 			continue;
105 
106 		if (idx >= AMDGPU_DM_MAX_CRTC) {
107 			DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
108 			mutex_unlock(&ddev->mode_config.mutex);
109 			return;
110 		}
111 
112 		aconnector = to_amdgpu_dm_connector(connector);
113 
114 		sort_connector[idx] = aconnector;
115 		idx++;
116 		connector_cnt++;
117 	}
118 	drm_connector_list_iter_end(&iter);
119 
120 	/* sort connectors by link_enc_hw_instance first */
121 	for (idx = connector_cnt; idx > 1 ; idx--) {
122 		for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
123 			if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
124 			    sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
125 				swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
126 		}
127 	}
128 
129 	/*
130 	 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
131 	 * sorted together above.
132 	 */
133 	for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
134 		if (sort_connector[idx]->mst_root) {
135 			uint8_t i, j, k;
136 			uint8_t mst_con_cnt = 1;
137 
138 			for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
139 				if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
140 					mst_con_cnt++;
141 				else
142 					break;
143 			}
144 
145 			for (i = mst_con_cnt; i > 1; i--) {
146 				for (j = idx; j < (idx + i - 2); j++) {
147 					int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
148 					int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
149 					u8 *rad;
150 					u8 *next_rad;
151 					bool swap = false;
152 
153 					/* Sort by mst tree depth first. Then compare RAD if depth is the same*/
154 					if (mstb_lct > next_mstb_lct) {
155 						swap = true;
156 					} else if (mstb_lct == next_mstb_lct) {
157 						if (mstb_lct == 1) {
158 							if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
159 								swap = true;
160 						} else if (mstb_lct > 1) {
161 							rad = sort_connector[j]->mst_output_port->parent->rad;
162 							next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
163 
164 							for (k = 0; k < mstb_lct - 1; k++) {
165 								int shift = (k % 2) ? 0 : 4;
166 								int port_num = (rad[k / 2] >> shift) & 0xf;
167 								int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
168 
169 								if (port_num > next_port_num) {
170 									swap = true;
171 									break;
172 								}
173 							}
174 						} else {
175 							DRM_ERROR("MST LCT shouldn't be set as < 1");
176 							mutex_unlock(&ddev->mode_config.mutex);
177 							return;
178 						}
179 					}
180 
181 					if (swap)
182 						swap(sort_connector[j], sort_connector[j + 1]);
183 				}
184 			}
185 
186 			idx += mst_con_cnt;
187 		} else {
188 			idx++;
189 		}
190 	}
191 
192 	/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
193 	memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
194 	for (idx = 0; idx < connector_cnt; idx++) {
195 		aconnector = sort_connector[idx];
196 
197 		dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
198 		dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
199 		dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
200 
201 		if (sort_connector[idx]->mst_root) {
202 			dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
203 			dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
204 			dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
205 			memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
206 				aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
207 		}
208 	}
209 	mutex_unlock(&ddev->mode_config.mutex);
210 
211 	dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
212 	dm->secure_display_ctx.phy_mapping_updated = true;
213 }
214 
get_phy_id(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint8_t * phy_id)215 static bool get_phy_id(struct amdgpu_display_manager *dm,
216 			struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
217 {
218 	int idx, idx_2;
219 	bool found = false;
220 
221 	/*
222 	 * Assume secure display start after all connectors are probed. The connection
223 	 * config is static as well
224 	 */
225 	if (!dm->secure_display_ctx.phy_mapping_updated) {
226 		DRM_WARN("%s Should update the phy id table before get it's value", __func__);
227 		return false;
228 	}
229 
230 	for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
231 		if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
232 			DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
233 			return false;
234 		}
235 
236 		if (aconnector->dc_link->link_enc_hw_inst ==
237 				dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
238 			if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
239 				found = true;
240 				goto out;
241 			} else {
242 				/* Could caused by wrongly pass mst root connector */
243 				if (!aconnector->mst_output_port) {
244 					DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
245 					return false;
246 				}
247 
248 				if (aconnector->mst_root &&
249 					aconnector->mst_root->mst_mgr.mst_primary == NULL) {
250 					DRM_WARN("%s pass in a stale mst connector", __func__);
251 				}
252 
253 				if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
254 					aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
255 					if (aconnector->mst_output_port->parent->lct == 1) {
256 						found = true;
257 						goto out;
258 					} else if (aconnector->mst_output_port->parent->lct > 1) {
259 						/* Check RAD */
260 						for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
261 							int shift = (idx_2 % 2) ? 0 : 4;
262 							int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
263 							int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
264 
265 							if (port_num != port_num2)
266 								break;
267 						}
268 
269 						if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
270 							found = true;
271 							goto out;
272 						}
273 					} else {
274 						DRM_ERROR("lCT should be >= 1");
275 						return false;
276 					}
277 				}
278 			}
279 		}
280 	}
281 
282 out:
283 	if (found) {
284 		DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
285 		*phy_id = idx;
286 	} else {
287 		DRM_WARN("Can't find associated phy ID");
288 		return false;
289 	}
290 
291 	return true;
292 }
293 
amdgpu_dm_set_crc_window_default(struct drm_crtc * crtc,struct dc_stream_state * stream)294 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
295 {
296 	struct drm_device *drm_dev = crtc->dev;
297 	struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
298 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
299 	struct amdgpu_dm_connector *aconnector;
300 	bool was_activated;
301 	uint8_t phy_id;
302 	unsigned long flags;
303 	int i;
304 
305 	spin_lock_irqsave(&drm_dev->event_lock, flags);
306 	was_activated = acrtc->dm_irq_params.crc_window_activated;
307 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
308 		acrtc->dm_irq_params.window_param[i].x_start = 0;
309 		acrtc->dm_irq_params.window_param[i].y_start = 0;
310 		acrtc->dm_irq_params.window_param[i].x_end = 0;
311 		acrtc->dm_irq_params.window_param[i].y_end = 0;
312 		acrtc->dm_irq_params.window_param[i].enable = false;
313 		acrtc->dm_irq_params.window_param[i].update_win = false;
314 		acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
315 	}
316 	acrtc->dm_irq_params.crc_window_activated = false;
317 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
318 
319 	/* Disable secure_display if it was enabled */
320 	if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
321 		/* stop ROI update on this crtc */
322 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
323 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
324 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
325 
326 		if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
327 			if (dm->secure_display_ctx.support_mul_roi)
328 				dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
329 			else
330 				dc_stream_forward_crc_window(stream, NULL, phy_id, true);
331 		} else {
332 			DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
333 		}
334 	}
335 }
336 
amdgpu_dm_crtc_notify_ta_to_read(struct work_struct * work)337 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
338 {
339 	struct secure_display_crtc_context *crtc_ctx;
340 	struct psp_context *psp;
341 	struct ta_securedisplay_cmd *securedisplay_cmd;
342 	struct drm_crtc *crtc;
343 	struct dc_stream_state *stream;
344 	struct amdgpu_dm_connector *aconnector;
345 	uint8_t phy_inst;
346 	struct amdgpu_display_manager *dm;
347 	struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
348 	unsigned long flags;
349 	uint8_t roi_idx = 0;
350 	int ret;
351 	int i;
352 
353 	crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
354 	crtc = crtc_ctx->crtc;
355 
356 	if (!crtc)
357 		return;
358 
359 	psp = &drm_to_adev(crtc->dev)->psp;
360 
361 	if (!psp->securedisplay_context.context.initialized) {
362 		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
363 		return;
364 	}
365 
366 	dm = &drm_to_adev(crtc->dev)->dm;
367 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
368 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
369 	if (!aconnector)
370 		return;
371 
372 	mutex_lock(&crtc->dev->mode_config.mutex);
373 	if (!get_phy_id(dm, aconnector, &phy_inst)) {
374 		DRM_WARN("%s Can't find mapping phy id!", __func__);
375 		mutex_unlock(&crtc->dev->mode_config.mutex);
376 		return;
377 	}
378 	mutex_unlock(&crtc->dev->mode_config.mutex);
379 
380 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
381 	memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
382 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
383 
384 	/* need lock for multiple crtcs to use the command buffer */
385 	mutex_lock(&psp->securedisplay_context.mutex);
386 	/* PSP TA is expected to finish data transmission over I2C within current frame,
387 	 * even there are up to 4 crtcs request to send in this frame.
388 	 */
389 	if (dm->secure_display_ctx.support_mul_roi) {
390 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
391 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
392 
393 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
394 
395 		for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
396 			if (crc_cpy[i].crc_ready)
397 				roi_idx |= 1 << i;
398 		}
399 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
400 
401 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
402 	} else {
403 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
404 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
405 
406 		securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
407 
408 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
409 	}
410 
411 	if (!ret) {
412 		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
413 			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
414 	}
415 
416 	mutex_unlock(&psp->securedisplay_context.mutex);
417 }
418 
419 static void
amdgpu_dm_forward_crc_window(struct work_struct * work)420 amdgpu_dm_forward_crc_window(struct work_struct *work)
421 {
422 	struct secure_display_crtc_context *crtc_ctx;
423 	struct amdgpu_display_manager *dm;
424 	struct drm_crtc *crtc;
425 	struct dc_stream_state *stream;
426 	struct amdgpu_dm_connector *aconnector;
427 	struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
428 	unsigned long flags;
429 	uint8_t phy_id;
430 
431 	crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
432 	crtc = crtc_ctx->crtc;
433 
434 	if (!crtc)
435 		return;
436 
437 	dm = &drm_to_adev(crtc->dev)->dm;
438 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
439 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
440 
441 	if (!aconnector)
442 		return;
443 
444 	mutex_lock(&crtc->dev->mode_config.mutex);
445 	if (!get_phy_id(dm, aconnector, &phy_id)) {
446 		DRM_WARN("%s Can't find mapping phy id!", __func__);
447 		mutex_unlock(&crtc->dev->mode_config.mutex);
448 		return;
449 	}
450 	mutex_unlock(&crtc->dev->mode_config.mutex);
451 
452 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
453 	memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
454 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
455 
456 	mutex_lock(&dm->dc_lock);
457 	if (dm->secure_display_ctx.support_mul_roi)
458 		dc_stream_forward_multiple_crc_window(stream, roi_cpy,
459 			phy_id, false);
460 	else
461 		dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
462 			phy_id, false);
463 	mutex_unlock(&dm->dc_lock);
464 }
465 
amdgpu_dm_crc_window_is_activated(struct drm_crtc * crtc)466 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
467 {
468 	struct drm_device *drm_dev = crtc->dev;
469 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
470 	bool ret = false;
471 
472 	spin_lock_irq(&drm_dev->event_lock);
473 	ret = acrtc->dm_irq_params.crc_window_activated;
474 	spin_unlock_irq(&drm_dev->event_lock);
475 
476 	return ret;
477 }
478 #endif
479 
480 int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)481 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
482 				 size_t *values_cnt)
483 {
484 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
485 
486 	if (source < 0) {
487 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
488 				 src_name, crtc->index);
489 		return -EINVAL;
490 	}
491 
492 	*values_cnt = 3;
493 	return 0;
494 }
495 
amdgpu_dm_crtc_configure_crc_source(struct drm_crtc * crtc,struct dm_crtc_state * dm_crtc_state,enum amdgpu_dm_pipe_crc_source source)496 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
497 					struct dm_crtc_state *dm_crtc_state,
498 					enum amdgpu_dm_pipe_crc_source source)
499 {
500 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
501 	struct dc_stream_state *stream_state = dm_crtc_state->stream;
502 	bool enable = amdgpu_dm_is_valid_crc_source(source);
503 	int ret = 0;
504 
505 	/* Configuration will be deferred to stream enable. */
506 	if (!stream_state)
507 		return -EINVAL;
508 
509 	mutex_lock(&adev->dm.dc_lock);
510 
511 	/* For PSR1, check that the panel has exited PSR */
512 	if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
513 		amdgpu_dm_psr_wait_disable(stream_state);
514 
515 	/* Enable or disable CRTC CRC generation */
516 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
517 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
518 					     stream_state, NULL, enable, enable, 0, true)) {
519 			ret = -EINVAL;
520 			goto unlock;
521 		}
522 	}
523 
524 	/* Configure dithering */
525 	if (!dm_need_crc_dither(source)) {
526 		dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
527 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
528 					    DYN_EXPANSION_DISABLE);
529 	} else {
530 		dc_stream_set_dither_option(stream_state,
531 					    DITHER_OPTION_DEFAULT);
532 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
533 					    DYN_EXPANSION_AUTO);
534 	}
535 
536 unlock:
537 	mutex_unlock(&adev->dm.dc_lock);
538 
539 	return ret;
540 }
541 
amdgpu_dm_crtc_set_crc_source(struct drm_crtc * crtc,const char * src_name)542 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
543 {
544 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
545 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
546 	struct drm_crtc_commit *commit;
547 	struct dm_crtc_state *crtc_state;
548 	struct drm_device *drm_dev = crtc->dev;
549 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
550 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
551 	struct amdgpu_display_manager *dm = &adev->dm;
552 #endif
553 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
554 	struct drm_dp_aux *aux = NULL;
555 	bool enable = false;
556 	bool enabled = false;
557 	int ret = 0;
558 
559 	if (source < 0) {
560 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
561 				 src_name, crtc->index);
562 		return -EINVAL;
563 	}
564 
565 	ret = drm_modeset_lock(&crtc->mutex, NULL);
566 	if (ret)
567 		return ret;
568 
569 	spin_lock(&crtc->commit_lock);
570 	commit = list_first_entry_or_null(&crtc->commit_list,
571 					  struct drm_crtc_commit, commit_entry);
572 	if (commit)
573 		drm_crtc_commit_get(commit);
574 	spin_unlock(&crtc->commit_lock);
575 
576 	if (commit) {
577 		/*
578 		 * Need to wait for all outstanding programming to complete
579 		 * in commit tail since it can modify CRC related fields and
580 		 * hardware state. Since we're holding the CRTC lock we're
581 		 * guaranteed that no other commit work can be queued off
582 		 * before we modify the state below.
583 		 */
584 		ret = wait_for_completion_interruptible_timeout(
585 			&commit->hw_done, 10 * HZ);
586 		if (ret)
587 			goto cleanup;
588 	}
589 
590 	enable = amdgpu_dm_is_valid_crc_source(source);
591 	crtc_state = to_dm_crtc_state(crtc->state);
592 	spin_lock_irq(&drm_dev->event_lock);
593 	cur_crc_src = acrtc->dm_irq_params.crc_src;
594 	spin_unlock_irq(&drm_dev->event_lock);
595 
596 	/*
597 	 * USER REQ SRC | CURRENT SRC | BEHAVIOR
598 	 * -----------------------------
599 	 * None         | None        | Do nothing
600 	 * None         | CRTC        | Disable CRTC CRC, set default to dither
601 	 * None         | DPRX        | Disable DPRX CRC, need 'aux', set default to dither
602 	 * None         | CRTC DITHER | Disable CRTC CRC
603 	 * None         | DPRX DITHER | Disable DPRX CRC, need 'aux'
604 	 * CRTC         | XXXX        | Enable CRTC CRC, no dither
605 	 * DPRX         | XXXX        | Enable DPRX CRC, need 'aux', no dither
606 	 * CRTC DITHER  | XXXX        | Enable CRTC CRC, set dither
607 	 * DPRX DITHER  | XXXX        | Enable DPRX CRC, need 'aux', set dither
608 	 */
609 	if (dm_is_crc_source_dprx(source) ||
610 	    (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
611 	     dm_is_crc_source_dprx(cur_crc_src))) {
612 		struct amdgpu_dm_connector *aconn = NULL;
613 		struct drm_connector *connector;
614 		struct drm_connector_list_iter conn_iter;
615 
616 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
617 		drm_for_each_connector_iter(connector, &conn_iter) {
618 			if (!connector->state || connector->state->crtc != crtc)
619 				continue;
620 
621 			if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
622 				continue;
623 
624 			aconn = to_amdgpu_dm_connector(connector);
625 			break;
626 		}
627 		drm_connector_list_iter_end(&conn_iter);
628 
629 		if (!aconn) {
630 			DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
631 			ret = -EINVAL;
632 			goto cleanup;
633 		}
634 
635 		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
636 
637 		if (!aux) {
638 			DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
639 			ret = -EINVAL;
640 			goto cleanup;
641 		}
642 
643 		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
644 				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
645 			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
646 			ret = -EINVAL;
647 			goto cleanup;
648 		}
649 
650 	}
651 
652 	/*
653 	 * Reading the CRC requires the vblank interrupt handler to be
654 	 * enabled. Keep a reference until CRC capture stops.
655 	 */
656 	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
657 	if (!enabled && enable) {
658 		ret = drm_crtc_vblank_get(crtc);
659 		if (ret)
660 			goto cleanup;
661 	}
662 
663 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
664 	/* Reset secure_display when we change crc source from debugfs */
665 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
666 #endif
667 
668 	if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
669 		ret = -EINVAL;
670 		goto cleanup;
671 	}
672 
673 	if (!enabled && enable) {
674 		if (dm_is_crc_source_dprx(source)) {
675 			if (drm_dp_start_crc(aux, crtc)) {
676 				DRM_DEBUG_DRIVER("dp start crc failed\n");
677 				ret = -EINVAL;
678 				goto cleanup;
679 			}
680 		}
681 	} else if (enabled && !enable) {
682 		drm_crtc_vblank_put(crtc);
683 		if (dm_is_crc_source_dprx(source)) {
684 			if (drm_dp_stop_crc(aux)) {
685 				DRM_DEBUG_DRIVER("dp stop crc failed\n");
686 				ret = -EINVAL;
687 				goto cleanup;
688 			}
689 		}
690 	}
691 
692 	spin_lock_irq(&drm_dev->event_lock);
693 	acrtc->dm_irq_params.crc_src = source;
694 	spin_unlock_irq(&drm_dev->event_lock);
695 
696 	/* Reset crc_skipped on dm state */
697 	crtc_state->crc_skip_count = 0;
698 
699 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
700 	/* Initialize phy id mapping table for secure display*/
701 	if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
702 		!dm->secure_display_ctx.phy_mapping_updated)
703 		update_phy_id_mapping(adev);
704 #endif
705 
706 cleanup:
707 	if (commit)
708 		drm_crtc_commit_put(commit);
709 
710 	drm_modeset_unlock(&crtc->mutex);
711 
712 	return ret;
713 }
714 
715 /**
716  * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
717  * @crtc: DRM CRTC object.
718  *
719  * This function should be called at the end of a vblank, when the fb has been
720  * fully processed through the pipe.
721  */
amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc * crtc)722 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
723 {
724 	struct dm_crtc_state *crtc_state;
725 	struct dc_stream_state *stream_state;
726 	struct drm_device *drm_dev = NULL;
727 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
728 	struct amdgpu_crtc *acrtc = NULL;
729 	uint32_t crcs[3];
730 	unsigned long flags;
731 
732 	if (crtc == NULL)
733 		return;
734 
735 	crtc_state = to_dm_crtc_state(crtc->state);
736 	stream_state = crtc_state->stream;
737 	acrtc = to_amdgpu_crtc(crtc);
738 	drm_dev = crtc->dev;
739 
740 	spin_lock_irqsave(&drm_dev->event_lock, flags);
741 	cur_crc_src = acrtc->dm_irq_params.crc_src;
742 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
743 
744 	/* Early return if CRC capture is not enabled. */
745 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
746 		return;
747 
748 	/*
749 	 * Since flipping and crc enablement happen asynchronously, we - more
750 	 * often than not - will be returning an 'uncooked' crc on first frame.
751 	 * Probably because hw isn't ready yet. For added security, skip the
752 	 * first two CRC values.
753 	 */
754 	if (crtc_state->crc_skip_count < 2) {
755 		crtc_state->crc_skip_count += 1;
756 		return;
757 	}
758 
759 	if (dm_is_crc_source_crtc(cur_crc_src)) {
760 		if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
761 				       &crcs[0], &crcs[1], &crcs[2]))
762 			return;
763 
764 		drm_crtc_add_crc_entry(crtc, true,
765 				       drm_crtc_accurate_vblank_count(crtc), crcs);
766 	}
767 }
768 
769 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc * crtc)770 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
771 {
772 	struct drm_device *drm_dev = NULL;
773 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
774 	struct amdgpu_crtc *acrtc = NULL;
775 	struct amdgpu_device *adev = NULL;
776 	struct secure_display_crtc_context *crtc_ctx = NULL;
777 	bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
778 	uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
779 	uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
780 	uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
781 	unsigned long flags1;
782 	bool forward_roi_change = false;
783 	bool notify_ta = false;
784 	bool all_crc_ready = true;
785 	struct dc_stream_state *stream_state;
786 	int i;
787 
788 	if (crtc == NULL)
789 		return;
790 
791 	acrtc = to_amdgpu_crtc(crtc);
792 	adev = drm_to_adev(crtc->dev);
793 	drm_dev = crtc->dev;
794 	stream_state = to_dm_crtc_state(crtc->state)->stream;
795 
796 	spin_lock_irqsave(&drm_dev->event_lock, flags1);
797 	cur_crc_src = acrtc->dm_irq_params.crc_src;
798 
799 	/* Early return if CRC capture is not enabled. */
800 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
801 	    !dm_is_crc_source_crtc(cur_crc_src)) {
802 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
803 		return;
804 	}
805 
806 	if (!acrtc->dm_irq_params.crc_window_activated) {
807 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
808 		return;
809 	}
810 
811 	crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
812 	if (WARN_ON(crtc_ctx->crtc != crtc)) {
813 		/* We have set the crtc when creating secure_display_crtc_context,
814 		 * don't expect it to be changed here.
815 		 */
816 		crtc_ctx->crtc = crtc;
817 	}
818 
819 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
820 		struct crc_params crc_window = {
821 			.windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
822 			.windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
823 			.windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
824 			.windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
825 			.windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
826 			.windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
827 			.windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
828 			.windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
829 		};
830 
831 		crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
832 
833 		if (!acrtc->dm_irq_params.window_param[i].enable) {
834 			crtc_ctx->crc_info.crc[i].crc_ready = false;
835 			continue;
836 		}
837 
838 		if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
839 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
840 			crtc_ctx->crc_info.crc[i].crc_ready = false;
841 			continue;
842 		}
843 
844 		if (acrtc->dm_irq_params.window_param[i].update_win) {
845 			crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
846 			crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
847 			crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
848 						crc_window.windowa_x_start;
849 			crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
850 						crc_window.windowa_y_start;
851 
852 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
853 				/* forward task to dmub to update ROI */
854 				forward_roi_change = true;
855 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
856 				/* update ROI via dm*/
857 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
858 					&crc_window, true, true, i, false);
859 
860 			reset_crc_frame_count[i] = true;
861 
862 			acrtc->dm_irq_params.window_param[i].update_win = false;
863 
864 			/* Statically skip 1 frame, because we may need to wait below things
865 			 * before sending ROI to dmub:
866 			 * 1. We defer the work by using system workqueue.
867 			 * 2. We may need to wait for dc_lock before accessing dmub.
868 			 */
869 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
870 			crtc_ctx->crc_info.crc[i].crc_ready = false;
871 		} else {
872 			if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
873 						&crc_r[i], &crc_g[i], &crc_b[i]))
874 				DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
875 
876 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
877 				/* forward task to psp to read ROI/CRC and output via I2C */
878 				notify_ta = true;
879 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
880 				/* Avoid ROI window get changed, keep overwriting. */
881 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
882 						&crc_window, true, true, i, false);
883 
884 			/* crc ready for psp to read out */
885 			crtc_ctx->crc_info.crc[i].crc_ready = true;
886 		}
887 	}
888 
889 	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
890 
891 	if (forward_roi_change)
892 		schedule_work(&crtc_ctx->forward_roi_work);
893 
894 	if (notify_ta)
895 		schedule_work(&crtc_ctx->notify_ta_work);
896 
897 	spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
898 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
899 		crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
900 		crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
901 		crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
902 
903 		if (!crtc_ctx->roi[i].enable) {
904 			crtc_ctx->crc_info.crc[i].frame_count = 0;
905 			continue;
906 		}
907 
908 		if (!crtc_ctx->crc_info.crc[i].crc_ready)
909 			all_crc_ready = false;
910 
911 		if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
912 			/* Reset the reference frame count after user update the ROI
913 			 * or it reaches the maximum value.
914 			 */
915 			crtc_ctx->crc_info.crc[i].frame_count = 0;
916 		else
917 			crtc_ctx->crc_info.crc[i].frame_count += 1;
918 	}
919 	spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
920 
921 	if (all_crc_ready)
922 		complete_all(&crtc_ctx->crc_info.completion);
923 }
924 
amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device * adev)925 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
926 {
927 	struct secure_display_crtc_context *crtc_ctx = NULL;
928 	int i;
929 
930 	crtc_ctx = kcalloc(adev->mode_info.num_crtc,
931 				      sizeof(struct secure_display_crtc_context),
932 				      GFP_KERNEL);
933 
934 	if (!crtc_ctx) {
935 		adev->dm.secure_display_ctx.crtc_ctx = NULL;
936 		return;
937 	}
938 
939 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
940 		INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
941 		INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
942 		crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
943 		spin_lock_init(&crtc_ctx[i].crc_info.lock);
944 	}
945 
946 	adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
947 
948 	adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
949 }
950 #endif
951