xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision fdcf62fbfb288f4cb050c02c5ab9bc58fc53a872)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 	.bind	= amdgpu_dm_audio_component_bind,
646 	.unbind	= amdgpu_dm_audio_component_unbind,
647 };
648 
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651 	int i, ret;
652 
653 	if (!amdgpu_audio)
654 		return 0;
655 
656 	adev->mode_info.audio.enabled = true;
657 
658 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659 
660 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 		adev->mode_info.audio.pin[i].channels = -1;
662 		adev->mode_info.audio.pin[i].rate = -1;
663 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 		adev->mode_info.audio.pin[i].status_bits = 0;
665 		adev->mode_info.audio.pin[i].category_code = 0;
666 		adev->mode_info.audio.pin[i].connected = false;
667 		adev->mode_info.audio.pin[i].id =
668 			adev->dm.dc->res_pool->audios[i]->inst;
669 		adev->mode_info.audio.pin[i].offset = 0;
670 	}
671 
672 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673 	if (ret < 0)
674 		return ret;
675 
676 	adev->dm.audio_registered = true;
677 
678 	return 0;
679 }
680 
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683 	if (!amdgpu_audio)
684 		return;
685 
686 	if (!adev->mode_info.audio.enabled)
687 		return;
688 
689 	if (adev->dm.audio_registered) {
690 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 		adev->dm.audio_registered = false;
692 	}
693 
694 	/* TODO: Disable audio? */
695 
696 	adev->mode_info.audio.enabled = false;
697 }
698 
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701 	struct drm_audio_component *acomp = adev->dm.audio_component;
702 
703 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705 
706 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707 						 pin, -1);
708 	}
709 }
710 
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713 	const struct dmcub_firmware_header_v1_0 *hdr;
714 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 	struct abm *abm = adev->dm.dc->res_pool->abm;
719 	struct dmub_srv_hw_params hw_params;
720 	enum dmub_status status;
721 	const unsigned char *fw_inst_const, *fw_bss_data;
722 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
723 	bool has_hw_support;
724 
725 	if (!dmub_srv)
726 		/* DMUB isn't supported on the ASIC. */
727 		return 0;
728 
729 	if (!fb_info) {
730 		DRM_ERROR("No framebuffer info for DMUB service.\n");
731 		return -EINVAL;
732 	}
733 
734 	if (!dmub_fw) {
735 		/* Firmware required for DMUB support. */
736 		DRM_ERROR("No firmware provided for DMUB.\n");
737 		return -EINVAL;
738 	}
739 
740 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 	if (status != DMUB_STATUS_OK) {
742 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743 		return -EINVAL;
744 	}
745 
746 	if (!has_hw_support) {
747 		DRM_INFO("DMUB unsupported on ASIC\n");
748 		return 0;
749 	}
750 
751 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752 
753 	fw_inst_const = dmub_fw->data +
754 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755 			PSP_HEADER_BYTES;
756 
757 	fw_bss_data = dmub_fw->data +
758 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 		      le32_to_cpu(hdr->inst_const_bytes);
760 
761 	/* Copy firmware and bios info into FB memory. */
762 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764 
765 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766 
767 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 	 * amdgpu_ucode_init_single_fw will load dmub firmware
769 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 	 * will be done by dm_dmub_hw_init
771 	 */
772 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774 				fw_inst_const_size);
775 	}
776 
777 	if (fw_bss_data_size)
778 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779 		       fw_bss_data, fw_bss_data_size);
780 
781 	/* Copy firmware bios info into FB memory. */
782 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783 	       adev->bios_size);
784 
785 	/* Reset regions that need to be reset. */
786 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788 
789 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791 
792 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794 
795 	/* Initialize hardware. */
796 	memset(&hw_params, 0, sizeof(hw_params));
797 	hw_params.fb_base = adev->gmc.fb_start;
798 	hw_params.fb_offset = adev->gmc.aper_base;
799 
800 	/* backdoor load firmware and trigger dmub running */
801 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802 		hw_params.load_inst_const = true;
803 
804 	if (dmcu)
805 		hw_params.psp_version = dmcu->psp_version;
806 
807 	for (i = 0; i < fb_info->num_fb; ++i)
808 		hw_params.fb[i] = &fb_info->fb[i];
809 
810 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
811 	if (status != DMUB_STATUS_OK) {
812 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813 		return -EINVAL;
814 	}
815 
816 	/* Wait for firmware load to finish. */
817 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818 	if (status != DMUB_STATUS_OK)
819 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820 
821 	/* Init DMCU and ABM if available. */
822 	if (dmcu && abm) {
823 		dmcu->funcs->dmcu_init(dmcu);
824 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825 	}
826 
827 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828 	if (!adev->dm.dc->ctx->dmub_srv) {
829 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830 		return -ENOMEM;
831 	}
832 
833 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834 		 adev->dm.dmcub_fw_version);
835 
836 	return 0;
837 }
838 
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
840 {
841 	struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843 	struct dc_callback_init init_params;
844 #endif
845 	int r;
846 
847 	adev->dm.ddev = adev->ddev;
848 	adev->dm.adev = adev;
849 
850 	/* Zero all the fields */
851 	memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853 	memset(&init_params, 0, sizeof(init_params));
854 #endif
855 
856 	mutex_init(&adev->dm.dc_lock);
857 	mutex_init(&adev->dm.audio_lock);
858 
859 	if(amdgpu_dm_irq_init(adev)) {
860 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861 		goto error;
862 	}
863 
864 	init_data.asic_id.chip_family = adev->family;
865 
866 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
867 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868 
869 	init_data.asic_id.vram_width = adev->gmc.vram_width;
870 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
871 	init_data.asic_id.atombios_base_address =
872 		adev->mode_info.atom_context->bios;
873 
874 	init_data.driver = adev;
875 
876 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877 
878 	if (!adev->dm.cgs_device) {
879 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
880 		goto error;
881 	}
882 
883 	init_data.cgs_device = adev->dm.cgs_device;
884 
885 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886 
887 	switch (adev->asic_type) {
888 	case CHIP_CARRIZO:
889 	case CHIP_STONEY:
890 	case CHIP_RAVEN:
891 	case CHIP_RENOIR:
892 		init_data.flags.gpu_vm_support = true;
893 		break;
894 	default:
895 		break;
896 	}
897 
898 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899 		init_data.flags.fbc_support = true;
900 
901 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902 		init_data.flags.multi_mon_pp_mclk_switch = true;
903 
904 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905 		init_data.flags.disable_fractional_pwm = true;
906 
907 	init_data.flags.power_down_display_on_boot = true;
908 
909 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910 
911 	/* Display Core create. */
912 	adev->dm.dc = dc_create(&init_data);
913 
914 	if (adev->dm.dc) {
915 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916 	} else {
917 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918 		goto error;
919 	}
920 
921 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
923 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
924 	}
925 
926 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
928 
929 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930 		adev->dm.dc->debug.disable_stutter = true;
931 
932 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933 		adev->dm.dc->debug.disable_dsc = true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936 		adev->dm.dc->debug.disable_clock_gate = true;
937 
938 	r = dm_dmub_hw_init(adev);
939 	if (r) {
940 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
941 		goto error;
942 	}
943 
944 	dc_hardware_init(adev->dm.dc);
945 
946 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947 	if (!adev->dm.freesync_module) {
948 		DRM_ERROR(
949 		"amdgpu: failed to initialize freesync_module.\n");
950 	} else
951 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952 				adev->dm.freesync_module);
953 
954 	amdgpu_dm_init_color_mod();
955 
956 #ifdef CONFIG_DRM_AMD_DC_HDCP
957 	if (adev->asic_type >= CHIP_RAVEN) {
958 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
959 
960 		if (!adev->dm.hdcp_workqueue)
961 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
962 		else
963 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
964 
965 		dc_init_callbacks(adev->dm.dc, &init_params);
966 	}
967 #endif
968 	if (amdgpu_dm_initialize_drm_device(adev)) {
969 		DRM_ERROR(
970 		"amdgpu: failed to initialize sw for display support.\n");
971 		goto error;
972 	}
973 
974 	/* Update the actual used number of crtc */
975 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
976 
977 	/* TODO: Add_display_info? */
978 
979 	/* TODO use dynamic cursor width */
980 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
981 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
982 
983 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
984 		DRM_ERROR(
985 		"amdgpu: failed to initialize sw for display support.\n");
986 		goto error;
987 	}
988 
989 	DRM_DEBUG_DRIVER("KMS initialized.\n");
990 
991 	return 0;
992 error:
993 	amdgpu_dm_fini(adev);
994 
995 	return -EINVAL;
996 }
997 
998 static void amdgpu_dm_fini(struct amdgpu_device *adev)
999 {
1000 	amdgpu_dm_audio_fini(adev);
1001 
1002 	amdgpu_dm_destroy_drm_device(&adev->dm);
1003 
1004 #ifdef CONFIG_DRM_AMD_DC_HDCP
1005 	if (adev->dm.hdcp_workqueue) {
1006 		hdcp_destroy(adev->dm.hdcp_workqueue);
1007 		adev->dm.hdcp_workqueue = NULL;
1008 	}
1009 
1010 	if (adev->dm.dc)
1011 		dc_deinit_callbacks(adev->dm.dc);
1012 #endif
1013 	if (adev->dm.dc->ctx->dmub_srv) {
1014 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1015 		adev->dm.dc->ctx->dmub_srv = NULL;
1016 	}
1017 
1018 	if (adev->dm.dmub_bo)
1019 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1020 				      &adev->dm.dmub_bo_gpu_addr,
1021 				      &adev->dm.dmub_bo_cpu_addr);
1022 
1023 	/* DC Destroy TODO: Replace destroy DAL */
1024 	if (adev->dm.dc)
1025 		dc_destroy(&adev->dm.dc);
1026 	/*
1027 	 * TODO: pageflip, vlank interrupt
1028 	 *
1029 	 * amdgpu_dm_irq_fini(adev);
1030 	 */
1031 
1032 	if (adev->dm.cgs_device) {
1033 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1034 		adev->dm.cgs_device = NULL;
1035 	}
1036 	if (adev->dm.freesync_module) {
1037 		mod_freesync_destroy(adev->dm.freesync_module);
1038 		adev->dm.freesync_module = NULL;
1039 	}
1040 
1041 	mutex_destroy(&adev->dm.audio_lock);
1042 	mutex_destroy(&adev->dm.dc_lock);
1043 
1044 	return;
1045 }
1046 
1047 static int load_dmcu_fw(struct amdgpu_device *adev)
1048 {
1049 	const char *fw_name_dmcu = NULL;
1050 	int r;
1051 	const struct dmcu_firmware_header_v1_0 *hdr;
1052 
1053 	switch(adev->asic_type) {
1054 	case CHIP_BONAIRE:
1055 	case CHIP_HAWAII:
1056 	case CHIP_KAVERI:
1057 	case CHIP_KABINI:
1058 	case CHIP_MULLINS:
1059 	case CHIP_TONGA:
1060 	case CHIP_FIJI:
1061 	case CHIP_CARRIZO:
1062 	case CHIP_STONEY:
1063 	case CHIP_POLARIS11:
1064 	case CHIP_POLARIS10:
1065 	case CHIP_POLARIS12:
1066 	case CHIP_VEGAM:
1067 	case CHIP_VEGA10:
1068 	case CHIP_VEGA12:
1069 	case CHIP_VEGA20:
1070 	case CHIP_NAVI10:
1071 	case CHIP_NAVI14:
1072 	case CHIP_RENOIR:
1073 		return 0;
1074 	case CHIP_NAVI12:
1075 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1076 		break;
1077 	case CHIP_RAVEN:
1078 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1079 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1080 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1081 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1082 		else
1083 			return 0;
1084 		break;
1085 	default:
1086 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1087 		return -EINVAL;
1088 	}
1089 
1090 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1091 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1092 		return 0;
1093 	}
1094 
1095 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1096 	if (r == -ENOENT) {
1097 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1098 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1099 		adev->dm.fw_dmcu = NULL;
1100 		return 0;
1101 	}
1102 	if (r) {
1103 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1104 			fw_name_dmcu);
1105 		return r;
1106 	}
1107 
1108 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1109 	if (r) {
1110 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1111 			fw_name_dmcu);
1112 		release_firmware(adev->dm.fw_dmcu);
1113 		adev->dm.fw_dmcu = NULL;
1114 		return r;
1115 	}
1116 
1117 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1118 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1119 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1120 	adev->firmware.fw_size +=
1121 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1122 
1123 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1124 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1125 	adev->firmware.fw_size +=
1126 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1127 
1128 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1129 
1130 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1131 
1132 	return 0;
1133 }
1134 
1135 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1136 {
1137 	struct amdgpu_device *adev = ctx;
1138 
1139 	return dm_read_reg(adev->dm.dc->ctx, address);
1140 }
1141 
1142 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1143 				     uint32_t value)
1144 {
1145 	struct amdgpu_device *adev = ctx;
1146 
1147 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1148 }
1149 
1150 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1151 {
1152 	struct dmub_srv_create_params create_params;
1153 	struct dmub_srv_region_params region_params;
1154 	struct dmub_srv_region_info region_info;
1155 	struct dmub_srv_fb_params fb_params;
1156 	struct dmub_srv_fb_info *fb_info;
1157 	struct dmub_srv *dmub_srv;
1158 	const struct dmcub_firmware_header_v1_0 *hdr;
1159 	const char *fw_name_dmub;
1160 	enum dmub_asic dmub_asic;
1161 	enum dmub_status status;
1162 	int r;
1163 
1164 	switch (adev->asic_type) {
1165 	case CHIP_RENOIR:
1166 		dmub_asic = DMUB_ASIC_DCN21;
1167 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1168 		break;
1169 
1170 	default:
1171 		/* ASIC doesn't support DMUB. */
1172 		return 0;
1173 	}
1174 
1175 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1176 	if (r) {
1177 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1178 		return 0;
1179 	}
1180 
1181 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1182 	if (r) {
1183 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1184 		return 0;
1185 	}
1186 
1187 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1188 
1189 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1191 			AMDGPU_UCODE_ID_DMCUB;
1192 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1193 			adev->dm.dmub_fw;
1194 		adev->firmware.fw_size +=
1195 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1196 
1197 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1198 			 adev->dm.dmcub_fw_version);
1199 	}
1200 
1201 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1202 
1203 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1204 	dmub_srv = adev->dm.dmub_srv;
1205 
1206 	if (!dmub_srv) {
1207 		DRM_ERROR("Failed to allocate DMUB service!\n");
1208 		return -ENOMEM;
1209 	}
1210 
1211 	memset(&create_params, 0, sizeof(create_params));
1212 	create_params.user_ctx = adev;
1213 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1214 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1215 	create_params.asic = dmub_asic;
1216 
1217 	/* Create the DMUB service. */
1218 	status = dmub_srv_create(dmub_srv, &create_params);
1219 	if (status != DMUB_STATUS_OK) {
1220 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1221 		return -EINVAL;
1222 	}
1223 
1224 	/* Calculate the size of all the regions for the DMUB service. */
1225 	memset(&region_params, 0, sizeof(region_params));
1226 
1227 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1228 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1229 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1230 	region_params.vbios_size = adev->bios_size;
1231 	region_params.fw_bss_data =
1232 		adev->dm.dmub_fw->data +
1233 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1234 		le32_to_cpu(hdr->inst_const_bytes);
1235 	region_params.fw_inst_const =
1236 		adev->dm.dmub_fw->data +
1237 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1238 		PSP_HEADER_BYTES;
1239 
1240 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1241 					   &region_info);
1242 
1243 	if (status != DMUB_STATUS_OK) {
1244 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1245 		return -EINVAL;
1246 	}
1247 
1248 	/*
1249 	 * Allocate a framebuffer based on the total size of all the regions.
1250 	 * TODO: Move this into GART.
1251 	 */
1252 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1253 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1254 				    &adev->dm.dmub_bo_gpu_addr,
1255 				    &adev->dm.dmub_bo_cpu_addr);
1256 	if (r)
1257 		return r;
1258 
1259 	/* Rebase the regions on the framebuffer address. */
1260 	memset(&fb_params, 0, sizeof(fb_params));
1261 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1262 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1263 	fb_params.region_info = &region_info;
1264 
1265 	adev->dm.dmub_fb_info =
1266 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1267 	fb_info = adev->dm.dmub_fb_info;
1268 
1269 	if (!fb_info) {
1270 		DRM_ERROR(
1271 			"Failed to allocate framebuffer info for DMUB service!\n");
1272 		return -ENOMEM;
1273 	}
1274 
1275 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1276 	if (status != DMUB_STATUS_OK) {
1277 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1278 		return -EINVAL;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static int dm_sw_init(void *handle)
1285 {
1286 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287 	int r;
1288 
1289 	r = dm_dmub_sw_init(adev);
1290 	if (r)
1291 		return r;
1292 
1293 	return load_dmcu_fw(adev);
1294 }
1295 
1296 static int dm_sw_fini(void *handle)
1297 {
1298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299 
1300 	kfree(adev->dm.dmub_fb_info);
1301 	adev->dm.dmub_fb_info = NULL;
1302 
1303 	if (adev->dm.dmub_srv) {
1304 		dmub_srv_destroy(adev->dm.dmub_srv);
1305 		adev->dm.dmub_srv = NULL;
1306 	}
1307 
1308 	if (adev->dm.dmub_fw) {
1309 		release_firmware(adev->dm.dmub_fw);
1310 		adev->dm.dmub_fw = NULL;
1311 	}
1312 
1313 	if(adev->dm.fw_dmcu) {
1314 		release_firmware(adev->dm.fw_dmcu);
1315 		adev->dm.fw_dmcu = NULL;
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1322 {
1323 	struct amdgpu_dm_connector *aconnector;
1324 	struct drm_connector *connector;
1325 	struct drm_connector_list_iter iter;
1326 	int ret = 0;
1327 
1328 	drm_connector_list_iter_begin(dev, &iter);
1329 	drm_for_each_connector_iter(connector, &iter) {
1330 		aconnector = to_amdgpu_dm_connector(connector);
1331 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1332 		    aconnector->mst_mgr.aux) {
1333 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1334 					 aconnector,
1335 					 aconnector->base.base.id);
1336 
1337 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1338 			if (ret < 0) {
1339 				DRM_ERROR("DM_MST: Failed to start MST\n");
1340 				aconnector->dc_link->type =
1341 					dc_connection_single;
1342 				break;
1343 			}
1344 		}
1345 	}
1346 	drm_connector_list_iter_end(&iter);
1347 
1348 	return ret;
1349 }
1350 
1351 static int dm_late_init(void *handle)
1352 {
1353 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 
1355 	struct dmcu_iram_parameters params;
1356 	unsigned int linear_lut[16];
1357 	int i;
1358 	struct dmcu *dmcu = NULL;
1359 	bool ret = false;
1360 
1361 	if (!adev->dm.fw_dmcu)
1362 		return detect_mst_link_for_all_connectors(adev->ddev);
1363 
1364 	dmcu = adev->dm.dc->res_pool->dmcu;
1365 
1366 	for (i = 0; i < 16; i++)
1367 		linear_lut[i] = 0xFFFF * i / 15;
1368 
1369 	params.set = 0;
1370 	params.backlight_ramping_start = 0xCCCC;
1371 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1372 	params.backlight_lut_array_size = 16;
1373 	params.backlight_lut_array = linear_lut;
1374 
1375 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1376 	 * 0xFFFF x 0.01 = 0x28F
1377 	 */
1378 	params.min_abm_backlight = 0x28F;
1379 
1380 	/* todo will enable for navi10 */
1381 	if (adev->asic_type <= CHIP_RAVEN) {
1382 		ret = dmcu_load_iram(dmcu, params);
1383 
1384 		if (!ret)
1385 			return -EINVAL;
1386 	}
1387 
1388 	return detect_mst_link_for_all_connectors(adev->ddev);
1389 }
1390 
1391 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1392 {
1393 	struct amdgpu_dm_connector *aconnector;
1394 	struct drm_connector *connector;
1395 	struct drm_connector_list_iter iter;
1396 	struct drm_dp_mst_topology_mgr *mgr;
1397 	int ret;
1398 	bool need_hotplug = false;
1399 
1400 	drm_connector_list_iter_begin(dev, &iter);
1401 	drm_for_each_connector_iter(connector, &iter) {
1402 		aconnector = to_amdgpu_dm_connector(connector);
1403 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1404 		    aconnector->mst_port)
1405 			continue;
1406 
1407 		mgr = &aconnector->mst_mgr;
1408 
1409 		if (suspend) {
1410 			drm_dp_mst_topology_mgr_suspend(mgr);
1411 		} else {
1412 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1413 			if (ret < 0) {
1414 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1415 				need_hotplug = true;
1416 			}
1417 		}
1418 	}
1419 	drm_connector_list_iter_end(&iter);
1420 
1421 	if (need_hotplug)
1422 		drm_kms_helper_hotplug_event(dev);
1423 }
1424 
1425 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1426 {
1427 	struct smu_context *smu = &adev->smu;
1428 	int ret = 0;
1429 
1430 	if (!is_support_sw_smu(adev))
1431 		return 0;
1432 
1433 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1434 	 * on window driver dc implementation.
1435 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1436 	 * should be passed to smu during boot up and resume from s3.
1437 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1438 	 * dcn20_resource_construct
1439 	 * then call pplib functions below to pass the settings to smu:
1440 	 * smu_set_watermarks_for_clock_ranges
1441 	 * smu_set_watermarks_table
1442 	 * navi10_set_watermarks_table
1443 	 * smu_write_watermarks_table
1444 	 *
1445 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1446 	 * dc has implemented different flow for window driver:
1447 	 * dc_hardware_init / dc_set_power_state
1448 	 * dcn10_init_hw
1449 	 * notify_wm_ranges
1450 	 * set_wm_ranges
1451 	 * -- Linux
1452 	 * smu_set_watermarks_for_clock_ranges
1453 	 * renoir_set_watermarks_table
1454 	 * smu_write_watermarks_table
1455 	 *
1456 	 * For Linux,
1457 	 * dc_hardware_init -> amdgpu_dm_init
1458 	 * dc_set_power_state --> dm_resume
1459 	 *
1460 	 * therefore, this function apply to navi10/12/14 but not Renoir
1461 	 * *
1462 	 */
1463 	switch(adev->asic_type) {
1464 	case CHIP_NAVI10:
1465 	case CHIP_NAVI14:
1466 	case CHIP_NAVI12:
1467 		break;
1468 	default:
1469 		return 0;
1470 	}
1471 
1472 	mutex_lock(&smu->mutex);
1473 
1474 	/* pass data to smu controller */
1475 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1476 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1477 		ret = smu_write_watermarks_table(smu);
1478 
1479 		if (ret) {
1480 			mutex_unlock(&smu->mutex);
1481 			DRM_ERROR("Failed to update WMTABLE!\n");
1482 			return ret;
1483 		}
1484 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1485 	}
1486 
1487 	mutex_unlock(&smu->mutex);
1488 
1489 	return 0;
1490 }
1491 
1492 /**
1493  * dm_hw_init() - Initialize DC device
1494  * @handle: The base driver device containing the amdgpu_dm device.
1495  *
1496  * Initialize the &struct amdgpu_display_manager device. This involves calling
1497  * the initializers of each DM component, then populating the struct with them.
1498  *
1499  * Although the function implies hardware initialization, both hardware and
1500  * software are initialized here. Splitting them out to their relevant init
1501  * hooks is a future TODO item.
1502  *
1503  * Some notable things that are initialized here:
1504  *
1505  * - Display Core, both software and hardware
1506  * - DC modules that we need (freesync and color management)
1507  * - DRM software states
1508  * - Interrupt sources and handlers
1509  * - Vblank support
1510  * - Debug FS entries, if enabled
1511  */
1512 static int dm_hw_init(void *handle)
1513 {
1514 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1515 	/* Create DAL display manager */
1516 	amdgpu_dm_init(adev);
1517 	amdgpu_dm_hpd_init(adev);
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * dm_hw_fini() - Teardown DC device
1524  * @handle: The base driver device containing the amdgpu_dm device.
1525  *
1526  * Teardown components within &struct amdgpu_display_manager that require
1527  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1528  * were loaded. Also flush IRQ workqueues and disable them.
1529  */
1530 static int dm_hw_fini(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 
1534 	amdgpu_dm_hpd_fini(adev);
1535 
1536 	amdgpu_dm_irq_fini(adev);
1537 	amdgpu_dm_fini(adev);
1538 	return 0;
1539 }
1540 
1541 static int dm_suspend(void *handle)
1542 {
1543 	struct amdgpu_device *adev = handle;
1544 	struct amdgpu_display_manager *dm = &adev->dm;
1545 
1546 	WARN_ON(adev->dm.cached_state);
1547 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1548 
1549 	s3_handle_mst(adev->ddev, true);
1550 
1551 	amdgpu_dm_irq_suspend(adev);
1552 
1553 
1554 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1555 
1556 	return 0;
1557 }
1558 
1559 static struct amdgpu_dm_connector *
1560 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1561 					     struct drm_crtc *crtc)
1562 {
1563 	uint32_t i;
1564 	struct drm_connector_state *new_con_state;
1565 	struct drm_connector *connector;
1566 	struct drm_crtc *crtc_from_state;
1567 
1568 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1569 		crtc_from_state = new_con_state->crtc;
1570 
1571 		if (crtc_from_state == crtc)
1572 			return to_amdgpu_dm_connector(connector);
1573 	}
1574 
1575 	return NULL;
1576 }
1577 
1578 static void emulated_link_detect(struct dc_link *link)
1579 {
1580 	struct dc_sink_init_data sink_init_data = { 0 };
1581 	struct display_sink_capability sink_caps = { 0 };
1582 	enum dc_edid_status edid_status;
1583 	struct dc_context *dc_ctx = link->ctx;
1584 	struct dc_sink *sink = NULL;
1585 	struct dc_sink *prev_sink = NULL;
1586 
1587 	link->type = dc_connection_none;
1588 	prev_sink = link->local_sink;
1589 
1590 	if (prev_sink != NULL)
1591 		dc_sink_retain(prev_sink);
1592 
1593 	switch (link->connector_signal) {
1594 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1595 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1596 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1597 		break;
1598 	}
1599 
1600 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1601 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1602 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1603 		break;
1604 	}
1605 
1606 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1607 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1608 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1609 		break;
1610 	}
1611 
1612 	case SIGNAL_TYPE_LVDS: {
1613 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1614 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1615 		break;
1616 	}
1617 
1618 	case SIGNAL_TYPE_EDP: {
1619 		sink_caps.transaction_type =
1620 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1621 		sink_caps.signal = SIGNAL_TYPE_EDP;
1622 		break;
1623 	}
1624 
1625 	case SIGNAL_TYPE_DISPLAY_PORT: {
1626 		sink_caps.transaction_type =
1627 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1628 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1629 		break;
1630 	}
1631 
1632 	default:
1633 		DC_ERROR("Invalid connector type! signal:%d\n",
1634 			link->connector_signal);
1635 		return;
1636 	}
1637 
1638 	sink_init_data.link = link;
1639 	sink_init_data.sink_signal = sink_caps.signal;
1640 
1641 	sink = dc_sink_create(&sink_init_data);
1642 	if (!sink) {
1643 		DC_ERROR("Failed to create sink!\n");
1644 		return;
1645 	}
1646 
1647 	/* dc_sink_create returns a new reference */
1648 	link->local_sink = sink;
1649 
1650 	edid_status = dm_helpers_read_local_edid(
1651 			link->ctx,
1652 			link,
1653 			sink);
1654 
1655 	if (edid_status != EDID_OK)
1656 		DC_ERROR("Failed to read EDID");
1657 
1658 }
1659 
1660 static int dm_resume(void *handle)
1661 {
1662 	struct amdgpu_device *adev = handle;
1663 	struct drm_device *ddev = adev->ddev;
1664 	struct amdgpu_display_manager *dm = &adev->dm;
1665 	struct amdgpu_dm_connector *aconnector;
1666 	struct drm_connector *connector;
1667 	struct drm_connector_list_iter iter;
1668 	struct drm_crtc *crtc;
1669 	struct drm_crtc_state *new_crtc_state;
1670 	struct dm_crtc_state *dm_new_crtc_state;
1671 	struct drm_plane *plane;
1672 	struct drm_plane_state *new_plane_state;
1673 	struct dm_plane_state *dm_new_plane_state;
1674 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1675 	enum dc_connection_type new_connection_type = dc_connection_none;
1676 	int i, r;
1677 
1678 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1679 	dc_release_state(dm_state->context);
1680 	dm_state->context = dc_create_state(dm->dc);
1681 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1682 	dc_resource_state_construct(dm->dc, dm_state->context);
1683 
1684 	/* Before powering on DC we need to re-initialize DMUB. */
1685 	r = dm_dmub_hw_init(adev);
1686 	if (r)
1687 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1688 
1689 	/* power on hardware */
1690 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1691 
1692 	/* program HPD filter */
1693 	dc_resume(dm->dc);
1694 
1695 	/*
1696 	 * early enable HPD Rx IRQ, should be done before set mode as short
1697 	 * pulse interrupts are used for MST
1698 	 */
1699 	amdgpu_dm_irq_resume_early(adev);
1700 
1701 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1702 	s3_handle_mst(ddev, false);
1703 
1704 	/* Do detection*/
1705 	drm_connector_list_iter_begin(ddev, &iter);
1706 	drm_for_each_connector_iter(connector, &iter) {
1707 		aconnector = to_amdgpu_dm_connector(connector);
1708 
1709 		/*
1710 		 * this is the case when traversing through already created
1711 		 * MST connectors, should be skipped
1712 		 */
1713 		if (aconnector->mst_port)
1714 			continue;
1715 
1716 		mutex_lock(&aconnector->hpd_lock);
1717 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1718 			DRM_ERROR("KMS: Failed to detect connector\n");
1719 
1720 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1721 			emulated_link_detect(aconnector->dc_link);
1722 		else
1723 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1724 
1725 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1726 			aconnector->fake_enable = false;
1727 
1728 		if (aconnector->dc_sink)
1729 			dc_sink_release(aconnector->dc_sink);
1730 		aconnector->dc_sink = NULL;
1731 		amdgpu_dm_update_connector_after_detect(aconnector);
1732 		mutex_unlock(&aconnector->hpd_lock);
1733 	}
1734 	drm_connector_list_iter_end(&iter);
1735 
1736 	/* Force mode set in atomic commit */
1737 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1738 		new_crtc_state->active_changed = true;
1739 
1740 	/*
1741 	 * atomic_check is expected to create the dc states. We need to release
1742 	 * them here, since they were duplicated as part of the suspend
1743 	 * procedure.
1744 	 */
1745 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1746 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1747 		if (dm_new_crtc_state->stream) {
1748 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1749 			dc_stream_release(dm_new_crtc_state->stream);
1750 			dm_new_crtc_state->stream = NULL;
1751 		}
1752 	}
1753 
1754 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1755 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1756 		if (dm_new_plane_state->dc_state) {
1757 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1758 			dc_plane_state_release(dm_new_plane_state->dc_state);
1759 			dm_new_plane_state->dc_state = NULL;
1760 		}
1761 	}
1762 
1763 	drm_atomic_helper_resume(ddev, dm->cached_state);
1764 
1765 	dm->cached_state = NULL;
1766 
1767 	amdgpu_dm_irq_resume_late(adev);
1768 
1769 	amdgpu_dm_smu_write_watermarks_table(adev);
1770 
1771 	return 0;
1772 }
1773 
1774 /**
1775  * DOC: DM Lifecycle
1776  *
1777  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1778  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1779  * the base driver's device list to be initialized and torn down accordingly.
1780  *
1781  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1782  */
1783 
1784 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1785 	.name = "dm",
1786 	.early_init = dm_early_init,
1787 	.late_init = dm_late_init,
1788 	.sw_init = dm_sw_init,
1789 	.sw_fini = dm_sw_fini,
1790 	.hw_init = dm_hw_init,
1791 	.hw_fini = dm_hw_fini,
1792 	.suspend = dm_suspend,
1793 	.resume = dm_resume,
1794 	.is_idle = dm_is_idle,
1795 	.wait_for_idle = dm_wait_for_idle,
1796 	.check_soft_reset = dm_check_soft_reset,
1797 	.soft_reset = dm_soft_reset,
1798 	.set_clockgating_state = dm_set_clockgating_state,
1799 	.set_powergating_state = dm_set_powergating_state,
1800 };
1801 
1802 const struct amdgpu_ip_block_version dm_ip_block =
1803 {
1804 	.type = AMD_IP_BLOCK_TYPE_DCE,
1805 	.major = 1,
1806 	.minor = 0,
1807 	.rev = 0,
1808 	.funcs = &amdgpu_dm_funcs,
1809 };
1810 
1811 
1812 /**
1813  * DOC: atomic
1814  *
1815  * *WIP*
1816  */
1817 
1818 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1819 	.fb_create = amdgpu_display_user_framebuffer_create,
1820 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1821 	.atomic_check = amdgpu_dm_atomic_check,
1822 	.atomic_commit = amdgpu_dm_atomic_commit,
1823 };
1824 
1825 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1826 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1827 };
1828 
1829 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1830 {
1831 	u32 max_cll, min_cll, max, min, q, r;
1832 	struct amdgpu_dm_backlight_caps *caps;
1833 	struct amdgpu_display_manager *dm;
1834 	struct drm_connector *conn_base;
1835 	struct amdgpu_device *adev;
1836 	static const u8 pre_computed_values[] = {
1837 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1838 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1839 
1840 	if (!aconnector || !aconnector->dc_link)
1841 		return;
1842 
1843 	conn_base = &aconnector->base;
1844 	adev = conn_base->dev->dev_private;
1845 	dm = &adev->dm;
1846 	caps = &dm->backlight_caps;
1847 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1848 	caps->aux_support = false;
1849 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1850 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1851 
1852 	if (caps->ext_caps->bits.oled == 1 ||
1853 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1854 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1855 		caps->aux_support = true;
1856 
1857 	/* From the specification (CTA-861-G), for calculating the maximum
1858 	 * luminance we need to use:
1859 	 *	Luminance = 50*2**(CV/32)
1860 	 * Where CV is a one-byte value.
1861 	 * For calculating this expression we may need float point precision;
1862 	 * to avoid this complexity level, we take advantage that CV is divided
1863 	 * by a constant. From the Euclids division algorithm, we know that CV
1864 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1865 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1866 	 * need to pre-compute the value of r/32. For pre-computing the values
1867 	 * We just used the following Ruby line:
1868 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1869 	 * The results of the above expressions can be verified at
1870 	 * pre_computed_values.
1871 	 */
1872 	q = max_cll >> 5;
1873 	r = max_cll % 32;
1874 	max = (1 << q) * pre_computed_values[r];
1875 
1876 	// min luminance: maxLum * (CV/255)^2 / 100
1877 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1878 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1879 
1880 	caps->aux_max_input_signal = max;
1881 	caps->aux_min_input_signal = min;
1882 }
1883 
1884 void amdgpu_dm_update_connector_after_detect(
1885 		struct amdgpu_dm_connector *aconnector)
1886 {
1887 	struct drm_connector *connector = &aconnector->base;
1888 	struct drm_device *dev = connector->dev;
1889 	struct dc_sink *sink;
1890 
1891 	/* MST handled by drm_mst framework */
1892 	if (aconnector->mst_mgr.mst_state == true)
1893 		return;
1894 
1895 
1896 	sink = aconnector->dc_link->local_sink;
1897 	if (sink)
1898 		dc_sink_retain(sink);
1899 
1900 	/*
1901 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1902 	 * the connector sink is set to either fake or physical sink depends on link status.
1903 	 * Skip if already done during boot.
1904 	 */
1905 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1906 			&& aconnector->dc_em_sink) {
1907 
1908 		/*
1909 		 * For S3 resume with headless use eml_sink to fake stream
1910 		 * because on resume connector->sink is set to NULL
1911 		 */
1912 		mutex_lock(&dev->mode_config.mutex);
1913 
1914 		if (sink) {
1915 			if (aconnector->dc_sink) {
1916 				amdgpu_dm_update_freesync_caps(connector, NULL);
1917 				/*
1918 				 * retain and release below are used to
1919 				 * bump up refcount for sink because the link doesn't point
1920 				 * to it anymore after disconnect, so on next crtc to connector
1921 				 * reshuffle by UMD we will get into unwanted dc_sink release
1922 				 */
1923 				dc_sink_release(aconnector->dc_sink);
1924 			}
1925 			aconnector->dc_sink = sink;
1926 			dc_sink_retain(aconnector->dc_sink);
1927 			amdgpu_dm_update_freesync_caps(connector,
1928 					aconnector->edid);
1929 		} else {
1930 			amdgpu_dm_update_freesync_caps(connector, NULL);
1931 			if (!aconnector->dc_sink) {
1932 				aconnector->dc_sink = aconnector->dc_em_sink;
1933 				dc_sink_retain(aconnector->dc_sink);
1934 			}
1935 		}
1936 
1937 		mutex_unlock(&dev->mode_config.mutex);
1938 
1939 		if (sink)
1940 			dc_sink_release(sink);
1941 		return;
1942 	}
1943 
1944 	/*
1945 	 * TODO: temporary guard to look for proper fix
1946 	 * if this sink is MST sink, we should not do anything
1947 	 */
1948 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1949 		dc_sink_release(sink);
1950 		return;
1951 	}
1952 
1953 	if (aconnector->dc_sink == sink) {
1954 		/*
1955 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1956 		 * Do nothing!!
1957 		 */
1958 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1959 				aconnector->connector_id);
1960 		if (sink)
1961 			dc_sink_release(sink);
1962 		return;
1963 	}
1964 
1965 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1966 		aconnector->connector_id, aconnector->dc_sink, sink);
1967 
1968 	mutex_lock(&dev->mode_config.mutex);
1969 
1970 	/*
1971 	 * 1. Update status of the drm connector
1972 	 * 2. Send an event and let userspace tell us what to do
1973 	 */
1974 	if (sink) {
1975 		/*
1976 		 * TODO: check if we still need the S3 mode update workaround.
1977 		 * If yes, put it here.
1978 		 */
1979 		if (aconnector->dc_sink)
1980 			amdgpu_dm_update_freesync_caps(connector, NULL);
1981 
1982 		aconnector->dc_sink = sink;
1983 		dc_sink_retain(aconnector->dc_sink);
1984 		if (sink->dc_edid.length == 0) {
1985 			aconnector->edid = NULL;
1986 			if (aconnector->dc_link->aux_mode) {
1987 				drm_dp_cec_unset_edid(
1988 					&aconnector->dm_dp_aux.aux);
1989 			}
1990 		} else {
1991 			aconnector->edid =
1992 				(struct edid *)sink->dc_edid.raw_edid;
1993 
1994 			drm_connector_update_edid_property(connector,
1995 							   aconnector->edid);
1996 
1997 			if (aconnector->dc_link->aux_mode)
1998 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1999 						    aconnector->edid);
2000 		}
2001 
2002 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2003 		update_connector_ext_caps(aconnector);
2004 	} else {
2005 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2006 		amdgpu_dm_update_freesync_caps(connector, NULL);
2007 		drm_connector_update_edid_property(connector, NULL);
2008 		aconnector->num_modes = 0;
2009 		dc_sink_release(aconnector->dc_sink);
2010 		aconnector->dc_sink = NULL;
2011 		aconnector->edid = NULL;
2012 #ifdef CONFIG_DRM_AMD_DC_HDCP
2013 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2014 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2015 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2016 #endif
2017 	}
2018 
2019 	mutex_unlock(&dev->mode_config.mutex);
2020 
2021 	if (sink)
2022 		dc_sink_release(sink);
2023 }
2024 
2025 static void handle_hpd_irq(void *param)
2026 {
2027 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2028 	struct drm_connector *connector = &aconnector->base;
2029 	struct drm_device *dev = connector->dev;
2030 	enum dc_connection_type new_connection_type = dc_connection_none;
2031 #ifdef CONFIG_DRM_AMD_DC_HDCP
2032 	struct amdgpu_device *adev = dev->dev_private;
2033 #endif
2034 
2035 	/*
2036 	 * In case of failure or MST no need to update connector status or notify the OS
2037 	 * since (for MST case) MST does this in its own context.
2038 	 */
2039 	mutex_lock(&aconnector->hpd_lock);
2040 
2041 #ifdef CONFIG_DRM_AMD_DC_HDCP
2042 	if (adev->dm.hdcp_workqueue)
2043 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2044 #endif
2045 	if (aconnector->fake_enable)
2046 		aconnector->fake_enable = false;
2047 
2048 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2049 		DRM_ERROR("KMS: Failed to detect connector\n");
2050 
2051 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2052 		emulated_link_detect(aconnector->dc_link);
2053 
2054 
2055 		drm_modeset_lock_all(dev);
2056 		dm_restore_drm_connector_state(dev, connector);
2057 		drm_modeset_unlock_all(dev);
2058 
2059 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2060 			drm_kms_helper_hotplug_event(dev);
2061 
2062 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2063 		amdgpu_dm_update_connector_after_detect(aconnector);
2064 
2065 
2066 		drm_modeset_lock_all(dev);
2067 		dm_restore_drm_connector_state(dev, connector);
2068 		drm_modeset_unlock_all(dev);
2069 
2070 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2071 			drm_kms_helper_hotplug_event(dev);
2072 	}
2073 	mutex_unlock(&aconnector->hpd_lock);
2074 
2075 }
2076 
2077 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2078 {
2079 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2080 	uint8_t dret;
2081 	bool new_irq_handled = false;
2082 	int dpcd_addr;
2083 	int dpcd_bytes_to_read;
2084 
2085 	const int max_process_count = 30;
2086 	int process_count = 0;
2087 
2088 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2089 
2090 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2091 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2092 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2093 		dpcd_addr = DP_SINK_COUNT;
2094 	} else {
2095 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2096 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2097 		dpcd_addr = DP_SINK_COUNT_ESI;
2098 	}
2099 
2100 	dret = drm_dp_dpcd_read(
2101 		&aconnector->dm_dp_aux.aux,
2102 		dpcd_addr,
2103 		esi,
2104 		dpcd_bytes_to_read);
2105 
2106 	while (dret == dpcd_bytes_to_read &&
2107 		process_count < max_process_count) {
2108 		uint8_t retry;
2109 		dret = 0;
2110 
2111 		process_count++;
2112 
2113 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2114 		/* handle HPD short pulse irq */
2115 		if (aconnector->mst_mgr.mst_state)
2116 			drm_dp_mst_hpd_irq(
2117 				&aconnector->mst_mgr,
2118 				esi,
2119 				&new_irq_handled);
2120 
2121 		if (new_irq_handled) {
2122 			/* ACK at DPCD to notify down stream */
2123 			const int ack_dpcd_bytes_to_write =
2124 				dpcd_bytes_to_read - 1;
2125 
2126 			for (retry = 0; retry < 3; retry++) {
2127 				uint8_t wret;
2128 
2129 				wret = drm_dp_dpcd_write(
2130 					&aconnector->dm_dp_aux.aux,
2131 					dpcd_addr + 1,
2132 					&esi[1],
2133 					ack_dpcd_bytes_to_write);
2134 				if (wret == ack_dpcd_bytes_to_write)
2135 					break;
2136 			}
2137 
2138 			/* check if there is new irq to be handled */
2139 			dret = drm_dp_dpcd_read(
2140 				&aconnector->dm_dp_aux.aux,
2141 				dpcd_addr,
2142 				esi,
2143 				dpcd_bytes_to_read);
2144 
2145 			new_irq_handled = false;
2146 		} else {
2147 			break;
2148 		}
2149 	}
2150 
2151 	if (process_count == max_process_count)
2152 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2153 }
2154 
2155 static void handle_hpd_rx_irq(void *param)
2156 {
2157 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2158 	struct drm_connector *connector = &aconnector->base;
2159 	struct drm_device *dev = connector->dev;
2160 	struct dc_link *dc_link = aconnector->dc_link;
2161 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2162 	enum dc_connection_type new_connection_type = dc_connection_none;
2163 #ifdef CONFIG_DRM_AMD_DC_HDCP
2164 	union hpd_irq_data hpd_irq_data;
2165 	struct amdgpu_device *adev = dev->dev_private;
2166 
2167 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2168 #endif
2169 
2170 	/*
2171 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2172 	 * conflict, after implement i2c helper, this mutex should be
2173 	 * retired.
2174 	 */
2175 	if (dc_link->type != dc_connection_mst_branch)
2176 		mutex_lock(&aconnector->hpd_lock);
2177 
2178 
2179 #ifdef CONFIG_DRM_AMD_DC_HDCP
2180 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2181 #else
2182 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2183 #endif
2184 			!is_mst_root_connector) {
2185 		/* Downstream Port status changed. */
2186 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2187 			DRM_ERROR("KMS: Failed to detect connector\n");
2188 
2189 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2190 			emulated_link_detect(dc_link);
2191 
2192 			if (aconnector->fake_enable)
2193 				aconnector->fake_enable = false;
2194 
2195 			amdgpu_dm_update_connector_after_detect(aconnector);
2196 
2197 
2198 			drm_modeset_lock_all(dev);
2199 			dm_restore_drm_connector_state(dev, connector);
2200 			drm_modeset_unlock_all(dev);
2201 
2202 			drm_kms_helper_hotplug_event(dev);
2203 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2204 
2205 			if (aconnector->fake_enable)
2206 				aconnector->fake_enable = false;
2207 
2208 			amdgpu_dm_update_connector_after_detect(aconnector);
2209 
2210 
2211 			drm_modeset_lock_all(dev);
2212 			dm_restore_drm_connector_state(dev, connector);
2213 			drm_modeset_unlock_all(dev);
2214 
2215 			drm_kms_helper_hotplug_event(dev);
2216 		}
2217 	}
2218 #ifdef CONFIG_DRM_AMD_DC_HDCP
2219 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2220 		if (adev->dm.hdcp_workqueue)
2221 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2222 	}
2223 #endif
2224 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2225 	    (dc_link->type == dc_connection_mst_branch))
2226 		dm_handle_hpd_rx_irq(aconnector);
2227 
2228 	if (dc_link->type != dc_connection_mst_branch) {
2229 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2230 		mutex_unlock(&aconnector->hpd_lock);
2231 	}
2232 }
2233 
2234 static void register_hpd_handlers(struct amdgpu_device *adev)
2235 {
2236 	struct drm_device *dev = adev->ddev;
2237 	struct drm_connector *connector;
2238 	struct amdgpu_dm_connector *aconnector;
2239 	const struct dc_link *dc_link;
2240 	struct dc_interrupt_params int_params = {0};
2241 
2242 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2243 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2244 
2245 	list_for_each_entry(connector,
2246 			&dev->mode_config.connector_list, head)	{
2247 
2248 		aconnector = to_amdgpu_dm_connector(connector);
2249 		dc_link = aconnector->dc_link;
2250 
2251 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2252 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2253 			int_params.irq_source = dc_link->irq_source_hpd;
2254 
2255 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2256 					handle_hpd_irq,
2257 					(void *) aconnector);
2258 		}
2259 
2260 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2261 
2262 			/* Also register for DP short pulse (hpd_rx). */
2263 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2264 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2265 
2266 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2267 					handle_hpd_rx_irq,
2268 					(void *) aconnector);
2269 		}
2270 	}
2271 }
2272 
2273 /* Register IRQ sources and initialize IRQ callbacks */
2274 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2275 {
2276 	struct dc *dc = adev->dm.dc;
2277 	struct common_irq_params *c_irq_params;
2278 	struct dc_interrupt_params int_params = {0};
2279 	int r;
2280 	int i;
2281 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2282 
2283 	if (adev->asic_type >= CHIP_VEGA10)
2284 		client_id = SOC15_IH_CLIENTID_DCE;
2285 
2286 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2287 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2288 
2289 	/*
2290 	 * Actions of amdgpu_irq_add_id():
2291 	 * 1. Register a set() function with base driver.
2292 	 *    Base driver will call set() function to enable/disable an
2293 	 *    interrupt in DC hardware.
2294 	 * 2. Register amdgpu_dm_irq_handler().
2295 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2296 	 *    coming from DC hardware.
2297 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2298 	 *    for acknowledging and handling. */
2299 
2300 	/* Use VBLANK interrupt */
2301 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2302 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2303 		if (r) {
2304 			DRM_ERROR("Failed to add crtc irq id!\n");
2305 			return r;
2306 		}
2307 
2308 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2309 		int_params.irq_source =
2310 			dc_interrupt_to_irq_source(dc, i, 0);
2311 
2312 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2313 
2314 		c_irq_params->adev = adev;
2315 		c_irq_params->irq_src = int_params.irq_source;
2316 
2317 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2318 				dm_crtc_high_irq, c_irq_params);
2319 	}
2320 
2321 	/* Use VUPDATE interrupt */
2322 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2323 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2324 		if (r) {
2325 			DRM_ERROR("Failed to add vupdate irq id!\n");
2326 			return r;
2327 		}
2328 
2329 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2330 		int_params.irq_source =
2331 			dc_interrupt_to_irq_source(dc, i, 0);
2332 
2333 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2334 
2335 		c_irq_params->adev = adev;
2336 		c_irq_params->irq_src = int_params.irq_source;
2337 
2338 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2339 				dm_vupdate_high_irq, c_irq_params);
2340 	}
2341 
2342 	/* Use GRPH_PFLIP interrupt */
2343 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2344 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2345 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2346 		if (r) {
2347 			DRM_ERROR("Failed to add page flip irq id!\n");
2348 			return r;
2349 		}
2350 
2351 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2352 		int_params.irq_source =
2353 			dc_interrupt_to_irq_source(dc, i, 0);
2354 
2355 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2356 
2357 		c_irq_params->adev = adev;
2358 		c_irq_params->irq_src = int_params.irq_source;
2359 
2360 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2361 				dm_pflip_high_irq, c_irq_params);
2362 
2363 	}
2364 
2365 	/* HPD */
2366 	r = amdgpu_irq_add_id(adev, client_id,
2367 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2368 	if (r) {
2369 		DRM_ERROR("Failed to add hpd irq id!\n");
2370 		return r;
2371 	}
2372 
2373 	register_hpd_handlers(adev);
2374 
2375 	return 0;
2376 }
2377 
2378 #if defined(CONFIG_DRM_AMD_DC_DCN)
2379 /* Register IRQ sources and initialize IRQ callbacks */
2380 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2381 {
2382 	struct dc *dc = adev->dm.dc;
2383 	struct common_irq_params *c_irq_params;
2384 	struct dc_interrupt_params int_params = {0};
2385 	int r;
2386 	int i;
2387 
2388 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2389 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2390 
2391 	/*
2392 	 * Actions of amdgpu_irq_add_id():
2393 	 * 1. Register a set() function with base driver.
2394 	 *    Base driver will call set() function to enable/disable an
2395 	 *    interrupt in DC hardware.
2396 	 * 2. Register amdgpu_dm_irq_handler().
2397 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2398 	 *    coming from DC hardware.
2399 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2400 	 *    for acknowledging and handling.
2401 	 */
2402 
2403 	/* Use VSTARTUP interrupt */
2404 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2405 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2406 			i++) {
2407 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2408 
2409 		if (r) {
2410 			DRM_ERROR("Failed to add crtc irq id!\n");
2411 			return r;
2412 		}
2413 
2414 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2415 		int_params.irq_source =
2416 			dc_interrupt_to_irq_source(dc, i, 0);
2417 
2418 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2419 
2420 		c_irq_params->adev = adev;
2421 		c_irq_params->irq_src = int_params.irq_source;
2422 
2423 		amdgpu_dm_irq_register_interrupt(
2424 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2425 	}
2426 
2427 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2428 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2429 	 * to trigger at end of each vblank, regardless of state of the lock,
2430 	 * matching DCE behaviour.
2431 	 */
2432 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2433 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2434 	     i++) {
2435 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2436 
2437 		if (r) {
2438 			DRM_ERROR("Failed to add vupdate irq id!\n");
2439 			return r;
2440 		}
2441 
2442 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2443 		int_params.irq_source =
2444 			dc_interrupt_to_irq_source(dc, i, 0);
2445 
2446 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2447 
2448 		c_irq_params->adev = adev;
2449 		c_irq_params->irq_src = int_params.irq_source;
2450 
2451 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2452 				dm_vupdate_high_irq, c_irq_params);
2453 	}
2454 
2455 	/* Use GRPH_PFLIP interrupt */
2456 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2457 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2458 			i++) {
2459 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2460 		if (r) {
2461 			DRM_ERROR("Failed to add page flip irq id!\n");
2462 			return r;
2463 		}
2464 
2465 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2466 		int_params.irq_source =
2467 			dc_interrupt_to_irq_source(dc, i, 0);
2468 
2469 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2470 
2471 		c_irq_params->adev = adev;
2472 		c_irq_params->irq_src = int_params.irq_source;
2473 
2474 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2475 				dm_pflip_high_irq, c_irq_params);
2476 
2477 	}
2478 
2479 	/* HPD */
2480 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2481 			&adev->hpd_irq);
2482 	if (r) {
2483 		DRM_ERROR("Failed to add hpd irq id!\n");
2484 		return r;
2485 	}
2486 
2487 	register_hpd_handlers(adev);
2488 
2489 	return 0;
2490 }
2491 #endif
2492 
2493 /*
2494  * Acquires the lock for the atomic state object and returns
2495  * the new atomic state.
2496  *
2497  * This should only be called during atomic check.
2498  */
2499 static int dm_atomic_get_state(struct drm_atomic_state *state,
2500 			       struct dm_atomic_state **dm_state)
2501 {
2502 	struct drm_device *dev = state->dev;
2503 	struct amdgpu_device *adev = dev->dev_private;
2504 	struct amdgpu_display_manager *dm = &adev->dm;
2505 	struct drm_private_state *priv_state;
2506 
2507 	if (*dm_state)
2508 		return 0;
2509 
2510 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2511 	if (IS_ERR(priv_state))
2512 		return PTR_ERR(priv_state);
2513 
2514 	*dm_state = to_dm_atomic_state(priv_state);
2515 
2516 	return 0;
2517 }
2518 
2519 struct dm_atomic_state *
2520 dm_atomic_get_new_state(struct drm_atomic_state *state)
2521 {
2522 	struct drm_device *dev = state->dev;
2523 	struct amdgpu_device *adev = dev->dev_private;
2524 	struct amdgpu_display_manager *dm = &adev->dm;
2525 	struct drm_private_obj *obj;
2526 	struct drm_private_state *new_obj_state;
2527 	int i;
2528 
2529 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2530 		if (obj->funcs == dm->atomic_obj.funcs)
2531 			return to_dm_atomic_state(new_obj_state);
2532 	}
2533 
2534 	return NULL;
2535 }
2536 
2537 struct dm_atomic_state *
2538 dm_atomic_get_old_state(struct drm_atomic_state *state)
2539 {
2540 	struct drm_device *dev = state->dev;
2541 	struct amdgpu_device *adev = dev->dev_private;
2542 	struct amdgpu_display_manager *dm = &adev->dm;
2543 	struct drm_private_obj *obj;
2544 	struct drm_private_state *old_obj_state;
2545 	int i;
2546 
2547 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2548 		if (obj->funcs == dm->atomic_obj.funcs)
2549 			return to_dm_atomic_state(old_obj_state);
2550 	}
2551 
2552 	return NULL;
2553 }
2554 
2555 static struct drm_private_state *
2556 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2557 {
2558 	struct dm_atomic_state *old_state, *new_state;
2559 
2560 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2561 	if (!new_state)
2562 		return NULL;
2563 
2564 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2565 
2566 	old_state = to_dm_atomic_state(obj->state);
2567 
2568 	if (old_state && old_state->context)
2569 		new_state->context = dc_copy_state(old_state->context);
2570 
2571 	if (!new_state->context) {
2572 		kfree(new_state);
2573 		return NULL;
2574 	}
2575 
2576 	return &new_state->base;
2577 }
2578 
2579 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2580 				    struct drm_private_state *state)
2581 {
2582 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2583 
2584 	if (dm_state && dm_state->context)
2585 		dc_release_state(dm_state->context);
2586 
2587 	kfree(dm_state);
2588 }
2589 
2590 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2591 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2592 	.atomic_destroy_state = dm_atomic_destroy_state,
2593 };
2594 
2595 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2596 {
2597 	struct dm_atomic_state *state;
2598 	int r;
2599 
2600 	adev->mode_info.mode_config_initialized = true;
2601 
2602 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2603 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2604 
2605 	adev->ddev->mode_config.max_width = 16384;
2606 	adev->ddev->mode_config.max_height = 16384;
2607 
2608 	adev->ddev->mode_config.preferred_depth = 24;
2609 	adev->ddev->mode_config.prefer_shadow = 1;
2610 	/* indicates support for immediate flip */
2611 	adev->ddev->mode_config.async_page_flip = true;
2612 
2613 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2614 
2615 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2616 	if (!state)
2617 		return -ENOMEM;
2618 
2619 	state->context = dc_create_state(adev->dm.dc);
2620 	if (!state->context) {
2621 		kfree(state);
2622 		return -ENOMEM;
2623 	}
2624 
2625 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2626 
2627 	drm_atomic_private_obj_init(adev->ddev,
2628 				    &adev->dm.atomic_obj,
2629 				    &state->base,
2630 				    &dm_atomic_state_funcs);
2631 
2632 	r = amdgpu_display_modeset_create_props(adev);
2633 	if (r)
2634 		return r;
2635 
2636 	r = amdgpu_dm_audio_init(adev);
2637 	if (r)
2638 		return r;
2639 
2640 	return 0;
2641 }
2642 
2643 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2644 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2645 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2646 
2647 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2648 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2649 
2650 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2651 {
2652 #if defined(CONFIG_ACPI)
2653 	struct amdgpu_dm_backlight_caps caps;
2654 
2655 	if (dm->backlight_caps.caps_valid)
2656 		return;
2657 
2658 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2659 	if (caps.caps_valid) {
2660 		dm->backlight_caps.caps_valid = true;
2661 		if (caps.aux_support)
2662 			return;
2663 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2664 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2665 	} else {
2666 		dm->backlight_caps.min_input_signal =
2667 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668 		dm->backlight_caps.max_input_signal =
2669 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2670 	}
2671 #else
2672 	if (dm->backlight_caps.aux_support)
2673 		return;
2674 
2675 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2676 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2677 #endif
2678 }
2679 
2680 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2681 {
2682 	bool rc;
2683 
2684 	if (!link)
2685 		return 1;
2686 
2687 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2688 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2689 
2690 	return rc ? 0 : 1;
2691 }
2692 
2693 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2694 			      const uint32_t user_brightness)
2695 {
2696 	u32 min, max, conversion_pace;
2697 	u32 brightness = user_brightness;
2698 
2699 	if (!caps)
2700 		goto out;
2701 
2702 	if (!caps->aux_support) {
2703 		max = caps->max_input_signal;
2704 		min = caps->min_input_signal;
2705 		/*
2706 		 * The brightness input is in the range 0-255
2707 		 * It needs to be rescaled to be between the
2708 		 * requested min and max input signal
2709 		 * It also needs to be scaled up by 0x101 to
2710 		 * match the DC interface which has a range of
2711 		 * 0 to 0xffff
2712 		 */
2713 		conversion_pace = 0x101;
2714 		brightness =
2715 			user_brightness
2716 			* conversion_pace
2717 			* (max - min)
2718 			/ AMDGPU_MAX_BL_LEVEL
2719 			+ min * conversion_pace;
2720 	} else {
2721 		/* TODO
2722 		 * We are doing a linear interpolation here, which is OK but
2723 		 * does not provide the optimal result. We probably want
2724 		 * something close to the Perceptual Quantizer (PQ) curve.
2725 		 */
2726 		max = caps->aux_max_input_signal;
2727 		min = caps->aux_min_input_signal;
2728 
2729 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2730 			       + user_brightness * max;
2731 		// Multiple the value by 1000 since we use millinits
2732 		brightness *= 1000;
2733 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2734 	}
2735 
2736 out:
2737 	return brightness;
2738 }
2739 
2740 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2741 {
2742 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2743 	struct amdgpu_dm_backlight_caps caps;
2744 	struct dc_link *link = NULL;
2745 	u32 brightness;
2746 	bool rc;
2747 
2748 	amdgpu_dm_update_backlight_caps(dm);
2749 	caps = dm->backlight_caps;
2750 
2751 	link = (struct dc_link *)dm->backlight_link;
2752 
2753 	brightness = convert_brightness(&caps, bd->props.brightness);
2754 	// Change brightness based on AUX property
2755 	if (caps.aux_support)
2756 		return set_backlight_via_aux(link, brightness);
2757 
2758 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2759 
2760 	return rc ? 0 : 1;
2761 }
2762 
2763 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2764 {
2765 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2766 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2767 
2768 	if (ret == DC_ERROR_UNEXPECTED)
2769 		return bd->props.brightness;
2770 	return ret;
2771 }
2772 
2773 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2774 	.options = BL_CORE_SUSPENDRESUME,
2775 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2776 	.update_status	= amdgpu_dm_backlight_update_status,
2777 };
2778 
2779 static void
2780 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2781 {
2782 	char bl_name[16];
2783 	struct backlight_properties props = { 0 };
2784 
2785 	amdgpu_dm_update_backlight_caps(dm);
2786 
2787 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2788 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2789 	props.type = BACKLIGHT_RAW;
2790 
2791 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2792 			dm->adev->ddev->primary->index);
2793 
2794 	dm->backlight_dev = backlight_device_register(bl_name,
2795 			dm->adev->ddev->dev,
2796 			dm,
2797 			&amdgpu_dm_backlight_ops,
2798 			&props);
2799 
2800 	if (IS_ERR(dm->backlight_dev))
2801 		DRM_ERROR("DM: Backlight registration failed!\n");
2802 	else
2803 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2804 }
2805 
2806 #endif
2807 
2808 static int initialize_plane(struct amdgpu_display_manager *dm,
2809 			    struct amdgpu_mode_info *mode_info, int plane_id,
2810 			    enum drm_plane_type plane_type,
2811 			    const struct dc_plane_cap *plane_cap)
2812 {
2813 	struct drm_plane *plane;
2814 	unsigned long possible_crtcs;
2815 	int ret = 0;
2816 
2817 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2818 	if (!plane) {
2819 		DRM_ERROR("KMS: Failed to allocate plane\n");
2820 		return -ENOMEM;
2821 	}
2822 	plane->type = plane_type;
2823 
2824 	/*
2825 	 * HACK: IGT tests expect that the primary plane for a CRTC
2826 	 * can only have one possible CRTC. Only expose support for
2827 	 * any CRTC if they're not going to be used as a primary plane
2828 	 * for a CRTC - like overlay or underlay planes.
2829 	 */
2830 	possible_crtcs = 1 << plane_id;
2831 	if (plane_id >= dm->dc->caps.max_streams)
2832 		possible_crtcs = 0xff;
2833 
2834 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2835 
2836 	if (ret) {
2837 		DRM_ERROR("KMS: Failed to initialize plane\n");
2838 		kfree(plane);
2839 		return ret;
2840 	}
2841 
2842 	if (mode_info)
2843 		mode_info->planes[plane_id] = plane;
2844 
2845 	return ret;
2846 }
2847 
2848 
2849 static void register_backlight_device(struct amdgpu_display_manager *dm,
2850 				      struct dc_link *link)
2851 {
2852 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2853 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2854 
2855 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2856 	    link->type != dc_connection_none) {
2857 		/*
2858 		 * Event if registration failed, we should continue with
2859 		 * DM initialization because not having a backlight control
2860 		 * is better then a black screen.
2861 		 */
2862 		amdgpu_dm_register_backlight_device(dm);
2863 
2864 		if (dm->backlight_dev)
2865 			dm->backlight_link = link;
2866 	}
2867 #endif
2868 }
2869 
2870 
2871 /*
2872  * In this architecture, the association
2873  * connector -> encoder -> crtc
2874  * id not really requried. The crtc and connector will hold the
2875  * display_index as an abstraction to use with DAL component
2876  *
2877  * Returns 0 on success
2878  */
2879 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2880 {
2881 	struct amdgpu_display_manager *dm = &adev->dm;
2882 	int32_t i;
2883 	struct amdgpu_dm_connector *aconnector = NULL;
2884 	struct amdgpu_encoder *aencoder = NULL;
2885 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2886 	uint32_t link_cnt;
2887 	int32_t primary_planes;
2888 	enum dc_connection_type new_connection_type = dc_connection_none;
2889 	const struct dc_plane_cap *plane;
2890 
2891 	link_cnt = dm->dc->caps.max_links;
2892 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2893 		DRM_ERROR("DM: Failed to initialize mode config\n");
2894 		return -EINVAL;
2895 	}
2896 
2897 	/* There is one primary plane per CRTC */
2898 	primary_planes = dm->dc->caps.max_streams;
2899 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2900 
2901 	/*
2902 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2903 	 * Order is reversed to match iteration order in atomic check.
2904 	 */
2905 	for (i = (primary_planes - 1); i >= 0; i--) {
2906 		plane = &dm->dc->caps.planes[i];
2907 
2908 		if (initialize_plane(dm, mode_info, i,
2909 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2910 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2911 			goto fail;
2912 		}
2913 	}
2914 
2915 	/*
2916 	 * Initialize overlay planes, index starting after primary planes.
2917 	 * These planes have a higher DRM index than the primary planes since
2918 	 * they should be considered as having a higher z-order.
2919 	 * Order is reversed to match iteration order in atomic check.
2920 	 *
2921 	 * Only support DCN for now, and only expose one so we don't encourage
2922 	 * userspace to use up all the pipes.
2923 	 */
2924 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2925 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2926 
2927 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2928 			continue;
2929 
2930 		if (!plane->blends_with_above || !plane->blends_with_below)
2931 			continue;
2932 
2933 		if (!plane->pixel_format_support.argb8888)
2934 			continue;
2935 
2936 		if (initialize_plane(dm, NULL, primary_planes + i,
2937 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2938 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2939 			goto fail;
2940 		}
2941 
2942 		/* Only create one overlay plane. */
2943 		break;
2944 	}
2945 
2946 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2947 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2948 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2949 			goto fail;
2950 		}
2951 
2952 	dm->display_indexes_num = dm->dc->caps.max_streams;
2953 
2954 	/* loops over all connectors on the board */
2955 	for (i = 0; i < link_cnt; i++) {
2956 		struct dc_link *link = NULL;
2957 
2958 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2959 			DRM_ERROR(
2960 				"KMS: Cannot support more than %d display indexes\n",
2961 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2962 			continue;
2963 		}
2964 
2965 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2966 		if (!aconnector)
2967 			goto fail;
2968 
2969 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2970 		if (!aencoder)
2971 			goto fail;
2972 
2973 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2974 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2975 			goto fail;
2976 		}
2977 
2978 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2979 			DRM_ERROR("KMS: Failed to initialize connector\n");
2980 			goto fail;
2981 		}
2982 
2983 		link = dc_get_link_at_index(dm->dc, i);
2984 
2985 		if (!dc_link_detect_sink(link, &new_connection_type))
2986 			DRM_ERROR("KMS: Failed to detect connector\n");
2987 
2988 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2989 			emulated_link_detect(link);
2990 			amdgpu_dm_update_connector_after_detect(aconnector);
2991 
2992 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2993 			amdgpu_dm_update_connector_after_detect(aconnector);
2994 			register_backlight_device(dm, link);
2995 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2996 				amdgpu_dm_set_psr_caps(link);
2997 		}
2998 
2999 
3000 	}
3001 
3002 	/* Software is initialized. Now we can register interrupt handlers. */
3003 	switch (adev->asic_type) {
3004 	case CHIP_BONAIRE:
3005 	case CHIP_HAWAII:
3006 	case CHIP_KAVERI:
3007 	case CHIP_KABINI:
3008 	case CHIP_MULLINS:
3009 	case CHIP_TONGA:
3010 	case CHIP_FIJI:
3011 	case CHIP_CARRIZO:
3012 	case CHIP_STONEY:
3013 	case CHIP_POLARIS11:
3014 	case CHIP_POLARIS10:
3015 	case CHIP_POLARIS12:
3016 	case CHIP_VEGAM:
3017 	case CHIP_VEGA10:
3018 	case CHIP_VEGA12:
3019 	case CHIP_VEGA20:
3020 		if (dce110_register_irq_handlers(dm->adev)) {
3021 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3022 			goto fail;
3023 		}
3024 		break;
3025 #if defined(CONFIG_DRM_AMD_DC_DCN)
3026 	case CHIP_RAVEN:
3027 	case CHIP_NAVI12:
3028 	case CHIP_NAVI10:
3029 	case CHIP_NAVI14:
3030 	case CHIP_RENOIR:
3031 		if (dcn10_register_irq_handlers(dm->adev)) {
3032 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3033 			goto fail;
3034 		}
3035 		break;
3036 #endif
3037 	default:
3038 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3039 		goto fail;
3040 	}
3041 
3042 	/* No userspace support. */
3043 	dm->dc->debug.disable_tri_buf = true;
3044 
3045 	return 0;
3046 fail:
3047 	kfree(aencoder);
3048 	kfree(aconnector);
3049 
3050 	return -EINVAL;
3051 }
3052 
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3054 {
3055 	drm_mode_config_cleanup(dm->ddev);
3056 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3057 	return;
3058 }
3059 
3060 /******************************************************************************
3061  * amdgpu_display_funcs functions
3062  *****************************************************************************/
3063 
3064 /*
3065  * dm_bandwidth_update - program display watermarks
3066  *
3067  * @adev: amdgpu_device pointer
3068  *
3069  * Calculate and program the display watermarks and line buffer allocation.
3070  */
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3072 {
3073 	/* TODO: implement later */
3074 }
3075 
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079 	.backlight_set_level = NULL, /* never called for DC */
3080 	.backlight_get_level = NULL, /* never called for DC */
3081 	.hpd_sense = NULL,/* called unconditionally */
3082 	.hpd_set_polarity = NULL, /* called unconditionally */
3083 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084 	.page_flip_get_scanoutpos =
3085 		dm_crtc_get_scanoutpos,/* called unconditionally */
3086 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3088 };
3089 
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3091 
3092 static ssize_t s3_debug_store(struct device *device,
3093 			      struct device_attribute *attr,
3094 			      const char *buf,
3095 			      size_t count)
3096 {
3097 	int ret;
3098 	int s3_state;
3099 	struct drm_device *drm_dev = dev_get_drvdata(device);
3100 	struct amdgpu_device *adev = drm_dev->dev_private;
3101 
3102 	ret = kstrtoint(buf, 0, &s3_state);
3103 
3104 	if (ret == 0) {
3105 		if (s3_state) {
3106 			dm_resume(adev);
3107 			drm_kms_helper_hotplug_event(adev->ddev);
3108 		} else
3109 			dm_suspend(adev);
3110 	}
3111 
3112 	return ret == 0 ? count : 0;
3113 }
3114 
3115 DEVICE_ATTR_WO(s3_debug);
3116 
3117 #endif
3118 
3119 static int dm_early_init(void *handle)
3120 {
3121 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122 
3123 	switch (adev->asic_type) {
3124 	case CHIP_BONAIRE:
3125 	case CHIP_HAWAII:
3126 		adev->mode_info.num_crtc = 6;
3127 		adev->mode_info.num_hpd = 6;
3128 		adev->mode_info.num_dig = 6;
3129 		break;
3130 	case CHIP_KAVERI:
3131 		adev->mode_info.num_crtc = 4;
3132 		adev->mode_info.num_hpd = 6;
3133 		adev->mode_info.num_dig = 7;
3134 		break;
3135 	case CHIP_KABINI:
3136 	case CHIP_MULLINS:
3137 		adev->mode_info.num_crtc = 2;
3138 		adev->mode_info.num_hpd = 6;
3139 		adev->mode_info.num_dig = 6;
3140 		break;
3141 	case CHIP_FIJI:
3142 	case CHIP_TONGA:
3143 		adev->mode_info.num_crtc = 6;
3144 		adev->mode_info.num_hpd = 6;
3145 		adev->mode_info.num_dig = 7;
3146 		break;
3147 	case CHIP_CARRIZO:
3148 		adev->mode_info.num_crtc = 3;
3149 		adev->mode_info.num_hpd = 6;
3150 		adev->mode_info.num_dig = 9;
3151 		break;
3152 	case CHIP_STONEY:
3153 		adev->mode_info.num_crtc = 2;
3154 		adev->mode_info.num_hpd = 6;
3155 		adev->mode_info.num_dig = 9;
3156 		break;
3157 	case CHIP_POLARIS11:
3158 	case CHIP_POLARIS12:
3159 		adev->mode_info.num_crtc = 5;
3160 		adev->mode_info.num_hpd = 5;
3161 		adev->mode_info.num_dig = 5;
3162 		break;
3163 	case CHIP_POLARIS10:
3164 	case CHIP_VEGAM:
3165 		adev->mode_info.num_crtc = 6;
3166 		adev->mode_info.num_hpd = 6;
3167 		adev->mode_info.num_dig = 6;
3168 		break;
3169 	case CHIP_VEGA10:
3170 	case CHIP_VEGA12:
3171 	case CHIP_VEGA20:
3172 		adev->mode_info.num_crtc = 6;
3173 		adev->mode_info.num_hpd = 6;
3174 		adev->mode_info.num_dig = 6;
3175 		break;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3177 	case CHIP_RAVEN:
3178 		adev->mode_info.num_crtc = 4;
3179 		adev->mode_info.num_hpd = 4;
3180 		adev->mode_info.num_dig = 4;
3181 		break;
3182 #endif
3183 	case CHIP_NAVI10:
3184 	case CHIP_NAVI12:
3185 		adev->mode_info.num_crtc = 6;
3186 		adev->mode_info.num_hpd = 6;
3187 		adev->mode_info.num_dig = 6;
3188 		break;
3189 	case CHIP_NAVI14:
3190 		adev->mode_info.num_crtc = 5;
3191 		adev->mode_info.num_hpd = 5;
3192 		adev->mode_info.num_dig = 5;
3193 		break;
3194 	case CHIP_RENOIR:
3195 		adev->mode_info.num_crtc = 4;
3196 		adev->mode_info.num_hpd = 4;
3197 		adev->mode_info.num_dig = 4;
3198 		break;
3199 	default:
3200 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3201 		return -EINVAL;
3202 	}
3203 
3204 	amdgpu_dm_set_irq_funcs(adev);
3205 
3206 	if (adev->mode_info.funcs == NULL)
3207 		adev->mode_info.funcs = &dm_display_funcs;
3208 
3209 	/*
3210 	 * Note: Do NOT change adev->audio_endpt_rreg and
3211 	 * adev->audio_endpt_wreg because they are initialised in
3212 	 * amdgpu_device_init()
3213 	 */
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3215 	device_create_file(
3216 		adev->ddev->dev,
3217 		&dev_attr_s3_debug);
3218 #endif
3219 
3220 	return 0;
3221 }
3222 
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224 			     struct dc_stream_state *new_stream,
3225 			     struct dc_stream_state *old_stream)
3226 {
3227 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228 		return false;
3229 
3230 	if (!crtc_state->enable)
3231 		return false;
3232 
3233 	return crtc_state->active;
3234 }
3235 
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3237 {
3238 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239 		return false;
3240 
3241 	return !crtc_state->enable || !crtc_state->active;
3242 }
3243 
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3245 {
3246 	drm_encoder_cleanup(encoder);
3247 	kfree(encoder);
3248 }
3249 
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 	.destroy = amdgpu_dm_encoder_destroy,
3252 };
3253 
3254 
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 				struct dc_scaling_info *scaling_info)
3257 {
3258 	int scale_w, scale_h;
3259 
3260 	memset(scaling_info, 0, sizeof(*scaling_info));
3261 
3262 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3263 	scaling_info->src_rect.x = state->src_x >> 16;
3264 	scaling_info->src_rect.y = state->src_y >> 16;
3265 
3266 	scaling_info->src_rect.width = state->src_w >> 16;
3267 	if (scaling_info->src_rect.width == 0)
3268 		return -EINVAL;
3269 
3270 	scaling_info->src_rect.height = state->src_h >> 16;
3271 	if (scaling_info->src_rect.height == 0)
3272 		return -EINVAL;
3273 
3274 	scaling_info->dst_rect.x = state->crtc_x;
3275 	scaling_info->dst_rect.y = state->crtc_y;
3276 
3277 	if (state->crtc_w == 0)
3278 		return -EINVAL;
3279 
3280 	scaling_info->dst_rect.width = state->crtc_w;
3281 
3282 	if (state->crtc_h == 0)
3283 		return -EINVAL;
3284 
3285 	scaling_info->dst_rect.height = state->crtc_h;
3286 
3287 	/* DRM doesn't specify clipping on destination output. */
3288 	scaling_info->clip_rect = scaling_info->dst_rect;
3289 
3290 	/* TODO: Validate scaling per-format with DC plane caps */
3291 	scale_w = scaling_info->dst_rect.width * 1000 /
3292 		  scaling_info->src_rect.width;
3293 
3294 	if (scale_w < 250 || scale_w > 16000)
3295 		return -EINVAL;
3296 
3297 	scale_h = scaling_info->dst_rect.height * 1000 /
3298 		  scaling_info->src_rect.height;
3299 
3300 	if (scale_h < 250 || scale_h > 16000)
3301 		return -EINVAL;
3302 
3303 	/*
3304 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 	 * assume reasonable defaults based on the format.
3306 	 */
3307 
3308 	return 0;
3309 }
3310 
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312 		       uint64_t *tiling_flags, bool *tmz_surface)
3313 {
3314 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315 	int r = amdgpu_bo_reserve(rbo, false);
3316 
3317 	if (unlikely(r)) {
3318 		/* Don't show error message when returning -ERESTARTSYS */
3319 		if (r != -ERESTARTSYS)
3320 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3321 		return r;
3322 	}
3323 
3324 	if (tiling_flags)
3325 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326 
3327 	if (tmz_surface)
3328 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3329 
3330 	amdgpu_bo_unreserve(rbo);
3331 
3332 	return r;
3333 }
3334 
3335 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3336 {
3337 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3338 
3339 	return offset ? (address + offset * 256) : 0;
3340 }
3341 
3342 static int
3343 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3344 			  const struct amdgpu_framebuffer *afb,
3345 			  const enum surface_pixel_format format,
3346 			  const enum dc_rotation_angle rotation,
3347 			  const struct plane_size *plane_size,
3348 			  const union dc_tiling_info *tiling_info,
3349 			  const uint64_t info,
3350 			  struct dc_plane_dcc_param *dcc,
3351 			  struct dc_plane_address *address,
3352 			  bool force_disable_dcc)
3353 {
3354 	struct dc *dc = adev->dm.dc;
3355 	struct dc_dcc_surface_param input;
3356 	struct dc_surface_dcc_cap output;
3357 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3358 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3359 	uint64_t dcc_address;
3360 
3361 	memset(&input, 0, sizeof(input));
3362 	memset(&output, 0, sizeof(output));
3363 
3364 	if (force_disable_dcc)
3365 		return 0;
3366 
3367 	if (!offset)
3368 		return 0;
3369 
3370 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3371 		return 0;
3372 
3373 	if (!dc->cap_funcs.get_dcc_compression_cap)
3374 		return -EINVAL;
3375 
3376 	input.format = format;
3377 	input.surface_size.width = plane_size->surface_size.width;
3378 	input.surface_size.height = plane_size->surface_size.height;
3379 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3380 
3381 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3382 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3383 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3384 		input.scan = SCAN_DIRECTION_VERTICAL;
3385 
3386 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3387 		return -EINVAL;
3388 
3389 	if (!output.capable)
3390 		return -EINVAL;
3391 
3392 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3393 		return -EINVAL;
3394 
3395 	dcc->enable = 1;
3396 	dcc->meta_pitch =
3397 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3398 	dcc->independent_64b_blks = i64b;
3399 
3400 	dcc_address = get_dcc_address(afb->address, info);
3401 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3402 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3403 
3404 	return 0;
3405 }
3406 
3407 static int
3408 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3409 			     const struct amdgpu_framebuffer *afb,
3410 			     const enum surface_pixel_format format,
3411 			     const enum dc_rotation_angle rotation,
3412 			     const uint64_t tiling_flags,
3413 			     union dc_tiling_info *tiling_info,
3414 			     struct plane_size *plane_size,
3415 			     struct dc_plane_dcc_param *dcc,
3416 			     struct dc_plane_address *address,
3417 			     bool tmz_surface,
3418 			     bool force_disable_dcc)
3419 {
3420 	const struct drm_framebuffer *fb = &afb->base;
3421 	int ret;
3422 
3423 	memset(tiling_info, 0, sizeof(*tiling_info));
3424 	memset(plane_size, 0, sizeof(*plane_size));
3425 	memset(dcc, 0, sizeof(*dcc));
3426 	memset(address, 0, sizeof(*address));
3427 
3428 	address->tmz_surface = tmz_surface;
3429 
3430 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3431 		plane_size->surface_size.x = 0;
3432 		plane_size->surface_size.y = 0;
3433 		plane_size->surface_size.width = fb->width;
3434 		plane_size->surface_size.height = fb->height;
3435 		plane_size->surface_pitch =
3436 			fb->pitches[0] / fb->format->cpp[0];
3437 
3438 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3439 		address->grph.addr.low_part = lower_32_bits(afb->address);
3440 		address->grph.addr.high_part = upper_32_bits(afb->address);
3441 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3442 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3443 
3444 		plane_size->surface_size.x = 0;
3445 		plane_size->surface_size.y = 0;
3446 		plane_size->surface_size.width = fb->width;
3447 		plane_size->surface_size.height = fb->height;
3448 		plane_size->surface_pitch =
3449 			fb->pitches[0] / fb->format->cpp[0];
3450 
3451 		plane_size->chroma_size.x = 0;
3452 		plane_size->chroma_size.y = 0;
3453 		/* TODO: set these based on surface format */
3454 		plane_size->chroma_size.width = fb->width / 2;
3455 		plane_size->chroma_size.height = fb->height / 2;
3456 
3457 		plane_size->chroma_pitch =
3458 			fb->pitches[1] / fb->format->cpp[1];
3459 
3460 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3461 		address->video_progressive.luma_addr.low_part =
3462 			lower_32_bits(afb->address);
3463 		address->video_progressive.luma_addr.high_part =
3464 			upper_32_bits(afb->address);
3465 		address->video_progressive.chroma_addr.low_part =
3466 			lower_32_bits(chroma_addr);
3467 		address->video_progressive.chroma_addr.high_part =
3468 			upper_32_bits(chroma_addr);
3469 	}
3470 
3471 	/* Fill GFX8 params */
3472 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3473 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3474 
3475 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3476 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3477 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3478 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3479 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3480 
3481 		/* XXX fix me for VI */
3482 		tiling_info->gfx8.num_banks = num_banks;
3483 		tiling_info->gfx8.array_mode =
3484 				DC_ARRAY_2D_TILED_THIN1;
3485 		tiling_info->gfx8.tile_split = tile_split;
3486 		tiling_info->gfx8.bank_width = bankw;
3487 		tiling_info->gfx8.bank_height = bankh;
3488 		tiling_info->gfx8.tile_aspect = mtaspect;
3489 		tiling_info->gfx8.tile_mode =
3490 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3491 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3492 			== DC_ARRAY_1D_TILED_THIN1) {
3493 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3494 	}
3495 
3496 	tiling_info->gfx8.pipe_config =
3497 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3498 
3499 	if (adev->asic_type == CHIP_VEGA10 ||
3500 	    adev->asic_type == CHIP_VEGA12 ||
3501 	    adev->asic_type == CHIP_VEGA20 ||
3502 	    adev->asic_type == CHIP_NAVI10 ||
3503 	    adev->asic_type == CHIP_NAVI14 ||
3504 	    adev->asic_type == CHIP_NAVI12 ||
3505 	    adev->asic_type == CHIP_RENOIR ||
3506 	    adev->asic_type == CHIP_RAVEN) {
3507 		/* Fill GFX9 params */
3508 		tiling_info->gfx9.num_pipes =
3509 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3510 		tiling_info->gfx9.num_banks =
3511 			adev->gfx.config.gb_addr_config_fields.num_banks;
3512 		tiling_info->gfx9.pipe_interleave =
3513 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3514 		tiling_info->gfx9.num_shader_engines =
3515 			adev->gfx.config.gb_addr_config_fields.num_se;
3516 		tiling_info->gfx9.max_compressed_frags =
3517 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3518 		tiling_info->gfx9.num_rb_per_se =
3519 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3520 		tiling_info->gfx9.swizzle =
3521 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3522 		tiling_info->gfx9.shaderEnable = 1;
3523 
3524 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3525 						plane_size, tiling_info,
3526 						tiling_flags, dcc, address,
3527 						force_disable_dcc);
3528 		if (ret)
3529 			return ret;
3530 	}
3531 
3532 	return 0;
3533 }
3534 
3535 static void
3536 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3537 			       bool *per_pixel_alpha, bool *global_alpha,
3538 			       int *global_alpha_value)
3539 {
3540 	*per_pixel_alpha = false;
3541 	*global_alpha = false;
3542 	*global_alpha_value = 0xff;
3543 
3544 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3545 		return;
3546 
3547 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3548 		static const uint32_t alpha_formats[] = {
3549 			DRM_FORMAT_ARGB8888,
3550 			DRM_FORMAT_RGBA8888,
3551 			DRM_FORMAT_ABGR8888,
3552 		};
3553 		uint32_t format = plane_state->fb->format->format;
3554 		unsigned int i;
3555 
3556 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3557 			if (format == alpha_formats[i]) {
3558 				*per_pixel_alpha = true;
3559 				break;
3560 			}
3561 		}
3562 	}
3563 
3564 	if (plane_state->alpha < 0xffff) {
3565 		*global_alpha = true;
3566 		*global_alpha_value = plane_state->alpha >> 8;
3567 	}
3568 }
3569 
3570 static int
3571 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3572 			    const enum surface_pixel_format format,
3573 			    enum dc_color_space *color_space)
3574 {
3575 	bool full_range;
3576 
3577 	*color_space = COLOR_SPACE_SRGB;
3578 
3579 	/* DRM color properties only affect non-RGB formats. */
3580 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3581 		return 0;
3582 
3583 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3584 
3585 	switch (plane_state->color_encoding) {
3586 	case DRM_COLOR_YCBCR_BT601:
3587 		if (full_range)
3588 			*color_space = COLOR_SPACE_YCBCR601;
3589 		else
3590 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3591 		break;
3592 
3593 	case DRM_COLOR_YCBCR_BT709:
3594 		if (full_range)
3595 			*color_space = COLOR_SPACE_YCBCR709;
3596 		else
3597 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3598 		break;
3599 
3600 	case DRM_COLOR_YCBCR_BT2020:
3601 		if (full_range)
3602 			*color_space = COLOR_SPACE_2020_YCBCR;
3603 		else
3604 			return -EINVAL;
3605 		break;
3606 
3607 	default:
3608 		return -EINVAL;
3609 	}
3610 
3611 	return 0;
3612 }
3613 
3614 static int
3615 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3616 			    const struct drm_plane_state *plane_state,
3617 			    const uint64_t tiling_flags,
3618 			    struct dc_plane_info *plane_info,
3619 			    struct dc_plane_address *address,
3620 			    bool tmz_surface,
3621 			    bool force_disable_dcc)
3622 {
3623 	const struct drm_framebuffer *fb = plane_state->fb;
3624 	const struct amdgpu_framebuffer *afb =
3625 		to_amdgpu_framebuffer(plane_state->fb);
3626 	struct drm_format_name_buf format_name;
3627 	int ret;
3628 
3629 	memset(plane_info, 0, sizeof(*plane_info));
3630 
3631 	switch (fb->format->format) {
3632 	case DRM_FORMAT_C8:
3633 		plane_info->format =
3634 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3635 		break;
3636 	case DRM_FORMAT_RGB565:
3637 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3638 		break;
3639 	case DRM_FORMAT_XRGB8888:
3640 	case DRM_FORMAT_ARGB8888:
3641 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3642 		break;
3643 	case DRM_FORMAT_XRGB2101010:
3644 	case DRM_FORMAT_ARGB2101010:
3645 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3646 		break;
3647 	case DRM_FORMAT_XBGR2101010:
3648 	case DRM_FORMAT_ABGR2101010:
3649 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3650 		break;
3651 	case DRM_FORMAT_XBGR8888:
3652 	case DRM_FORMAT_ABGR8888:
3653 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3654 		break;
3655 	case DRM_FORMAT_NV21:
3656 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3657 		break;
3658 	case DRM_FORMAT_NV12:
3659 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3660 		break;
3661 	case DRM_FORMAT_P010:
3662 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3663 		break;
3664 	case DRM_FORMAT_XRGB16161616F:
3665 	case DRM_FORMAT_ARGB16161616F:
3666 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3667 		break;
3668 	case DRM_FORMAT_XBGR16161616F:
3669 	case DRM_FORMAT_ABGR16161616F:
3670 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3671 		break;
3672 	default:
3673 		DRM_ERROR(
3674 			"Unsupported screen format %s\n",
3675 			drm_get_format_name(fb->format->format, &format_name));
3676 		return -EINVAL;
3677 	}
3678 
3679 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3680 	case DRM_MODE_ROTATE_0:
3681 		plane_info->rotation = ROTATION_ANGLE_0;
3682 		break;
3683 	case DRM_MODE_ROTATE_90:
3684 		plane_info->rotation = ROTATION_ANGLE_90;
3685 		break;
3686 	case DRM_MODE_ROTATE_180:
3687 		plane_info->rotation = ROTATION_ANGLE_180;
3688 		break;
3689 	case DRM_MODE_ROTATE_270:
3690 		plane_info->rotation = ROTATION_ANGLE_270;
3691 		break;
3692 	default:
3693 		plane_info->rotation = ROTATION_ANGLE_0;
3694 		break;
3695 	}
3696 
3697 	plane_info->visible = true;
3698 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3699 
3700 	plane_info->layer_index = 0;
3701 
3702 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3703 					  &plane_info->color_space);
3704 	if (ret)
3705 		return ret;
3706 
3707 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3708 					   plane_info->rotation, tiling_flags,
3709 					   &plane_info->tiling_info,
3710 					   &plane_info->plane_size,
3711 					   &plane_info->dcc, address, tmz_surface,
3712 					   force_disable_dcc);
3713 	if (ret)
3714 		return ret;
3715 
3716 	fill_blending_from_plane_state(
3717 		plane_state, &plane_info->per_pixel_alpha,
3718 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3719 
3720 	return 0;
3721 }
3722 
3723 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3724 				    struct dc_plane_state *dc_plane_state,
3725 				    struct drm_plane_state *plane_state,
3726 				    struct drm_crtc_state *crtc_state)
3727 {
3728 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3729 	const struct amdgpu_framebuffer *amdgpu_fb =
3730 		to_amdgpu_framebuffer(plane_state->fb);
3731 	struct dc_scaling_info scaling_info;
3732 	struct dc_plane_info plane_info;
3733 	uint64_t tiling_flags;
3734 	int ret;
3735 	bool tmz_surface = false;
3736 	bool force_disable_dcc = false;
3737 
3738 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3739 	if (ret)
3740 		return ret;
3741 
3742 	dc_plane_state->src_rect = scaling_info.src_rect;
3743 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3744 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3745 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3746 
3747 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3748 	if (ret)
3749 		return ret;
3750 
3751 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3752 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3753 					  &plane_info,
3754 					  &dc_plane_state->address,
3755 					  tmz_surface,
3756 					  force_disable_dcc);
3757 	if (ret)
3758 		return ret;
3759 
3760 	dc_plane_state->format = plane_info.format;
3761 	dc_plane_state->color_space = plane_info.color_space;
3762 	dc_plane_state->format = plane_info.format;
3763 	dc_plane_state->plane_size = plane_info.plane_size;
3764 	dc_plane_state->rotation = plane_info.rotation;
3765 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3766 	dc_plane_state->stereo_format = plane_info.stereo_format;
3767 	dc_plane_state->tiling_info = plane_info.tiling_info;
3768 	dc_plane_state->visible = plane_info.visible;
3769 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3770 	dc_plane_state->global_alpha = plane_info.global_alpha;
3771 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3772 	dc_plane_state->dcc = plane_info.dcc;
3773 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3774 
3775 	/*
3776 	 * Always set input transfer function, since plane state is refreshed
3777 	 * every time.
3778 	 */
3779 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3780 	if (ret)
3781 		return ret;
3782 
3783 	return 0;
3784 }
3785 
3786 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3787 					   const struct dm_connector_state *dm_state,
3788 					   struct dc_stream_state *stream)
3789 {
3790 	enum amdgpu_rmx_type rmx_type;
3791 
3792 	struct rect src = { 0 }; /* viewport in composition space*/
3793 	struct rect dst = { 0 }; /* stream addressable area */
3794 
3795 	/* no mode. nothing to be done */
3796 	if (!mode)
3797 		return;
3798 
3799 	/* Full screen scaling by default */
3800 	src.width = mode->hdisplay;
3801 	src.height = mode->vdisplay;
3802 	dst.width = stream->timing.h_addressable;
3803 	dst.height = stream->timing.v_addressable;
3804 
3805 	if (dm_state) {
3806 		rmx_type = dm_state->scaling;
3807 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3808 			if (src.width * dst.height <
3809 					src.height * dst.width) {
3810 				/* height needs less upscaling/more downscaling */
3811 				dst.width = src.width *
3812 						dst.height / src.height;
3813 			} else {
3814 				/* width needs less upscaling/more downscaling */
3815 				dst.height = src.height *
3816 						dst.width / src.width;
3817 			}
3818 		} else if (rmx_type == RMX_CENTER) {
3819 			dst = src;
3820 		}
3821 
3822 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3823 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3824 
3825 		if (dm_state->underscan_enable) {
3826 			dst.x += dm_state->underscan_hborder / 2;
3827 			dst.y += dm_state->underscan_vborder / 2;
3828 			dst.width -= dm_state->underscan_hborder;
3829 			dst.height -= dm_state->underscan_vborder;
3830 		}
3831 	}
3832 
3833 	stream->src = src;
3834 	stream->dst = dst;
3835 
3836 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3837 			dst.x, dst.y, dst.width, dst.height);
3838 
3839 }
3840 
3841 static enum dc_color_depth
3842 convert_color_depth_from_display_info(const struct drm_connector *connector,
3843 				      const struct drm_connector_state *state,
3844 				      bool is_y420)
3845 {
3846 	uint8_t bpc;
3847 
3848 	if (is_y420) {
3849 		bpc = 8;
3850 
3851 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3852 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3853 			bpc = 16;
3854 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3855 			bpc = 12;
3856 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3857 			bpc = 10;
3858 	} else {
3859 		bpc = (uint8_t)connector->display_info.bpc;
3860 		/* Assume 8 bpc by default if no bpc is specified. */
3861 		bpc = bpc ? bpc : 8;
3862 	}
3863 
3864 	if (!state)
3865 		state = connector->state;
3866 
3867 	if (state) {
3868 		/*
3869 		 * Cap display bpc based on the user requested value.
3870 		 *
3871 		 * The value for state->max_bpc may not correctly updated
3872 		 * depending on when the connector gets added to the state
3873 		 * or if this was called outside of atomic check, so it
3874 		 * can't be used directly.
3875 		 */
3876 		bpc = min(bpc, state->max_requested_bpc);
3877 
3878 		/* Round down to the nearest even number. */
3879 		bpc = bpc - (bpc & 1);
3880 	}
3881 
3882 	switch (bpc) {
3883 	case 0:
3884 		/*
3885 		 * Temporary Work around, DRM doesn't parse color depth for
3886 		 * EDID revision before 1.4
3887 		 * TODO: Fix edid parsing
3888 		 */
3889 		return COLOR_DEPTH_888;
3890 	case 6:
3891 		return COLOR_DEPTH_666;
3892 	case 8:
3893 		return COLOR_DEPTH_888;
3894 	case 10:
3895 		return COLOR_DEPTH_101010;
3896 	case 12:
3897 		return COLOR_DEPTH_121212;
3898 	case 14:
3899 		return COLOR_DEPTH_141414;
3900 	case 16:
3901 		return COLOR_DEPTH_161616;
3902 	default:
3903 		return COLOR_DEPTH_UNDEFINED;
3904 	}
3905 }
3906 
3907 static enum dc_aspect_ratio
3908 get_aspect_ratio(const struct drm_display_mode *mode_in)
3909 {
3910 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3911 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3912 }
3913 
3914 static enum dc_color_space
3915 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3916 {
3917 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3918 
3919 	switch (dc_crtc_timing->pixel_encoding)	{
3920 	case PIXEL_ENCODING_YCBCR422:
3921 	case PIXEL_ENCODING_YCBCR444:
3922 	case PIXEL_ENCODING_YCBCR420:
3923 	{
3924 		/*
3925 		 * 27030khz is the separation point between HDTV and SDTV
3926 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3927 		 * respectively
3928 		 */
3929 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3930 			if (dc_crtc_timing->flags.Y_ONLY)
3931 				color_space =
3932 					COLOR_SPACE_YCBCR709_LIMITED;
3933 			else
3934 				color_space = COLOR_SPACE_YCBCR709;
3935 		} else {
3936 			if (dc_crtc_timing->flags.Y_ONLY)
3937 				color_space =
3938 					COLOR_SPACE_YCBCR601_LIMITED;
3939 			else
3940 				color_space = COLOR_SPACE_YCBCR601;
3941 		}
3942 
3943 	}
3944 	break;
3945 	case PIXEL_ENCODING_RGB:
3946 		color_space = COLOR_SPACE_SRGB;
3947 		break;
3948 
3949 	default:
3950 		WARN_ON(1);
3951 		break;
3952 	}
3953 
3954 	return color_space;
3955 }
3956 
3957 static bool adjust_colour_depth_from_display_info(
3958 	struct dc_crtc_timing *timing_out,
3959 	const struct drm_display_info *info)
3960 {
3961 	enum dc_color_depth depth = timing_out->display_color_depth;
3962 	int normalized_clk;
3963 	do {
3964 		normalized_clk = timing_out->pix_clk_100hz / 10;
3965 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3966 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3967 			normalized_clk /= 2;
3968 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3969 		switch (depth) {
3970 		case COLOR_DEPTH_888:
3971 			break;
3972 		case COLOR_DEPTH_101010:
3973 			normalized_clk = (normalized_clk * 30) / 24;
3974 			break;
3975 		case COLOR_DEPTH_121212:
3976 			normalized_clk = (normalized_clk * 36) / 24;
3977 			break;
3978 		case COLOR_DEPTH_161616:
3979 			normalized_clk = (normalized_clk * 48) / 24;
3980 			break;
3981 		default:
3982 			/* The above depths are the only ones valid for HDMI. */
3983 			return false;
3984 		}
3985 		if (normalized_clk <= info->max_tmds_clock) {
3986 			timing_out->display_color_depth = depth;
3987 			return true;
3988 		}
3989 	} while (--depth > COLOR_DEPTH_666);
3990 	return false;
3991 }
3992 
3993 static void fill_stream_properties_from_drm_display_mode(
3994 	struct dc_stream_state *stream,
3995 	const struct drm_display_mode *mode_in,
3996 	const struct drm_connector *connector,
3997 	const struct drm_connector_state *connector_state,
3998 	const struct dc_stream_state *old_stream)
3999 {
4000 	struct dc_crtc_timing *timing_out = &stream->timing;
4001 	const struct drm_display_info *info = &connector->display_info;
4002 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4003 	struct hdmi_vendor_infoframe hv_frame;
4004 	struct hdmi_avi_infoframe avi_frame;
4005 
4006 	memset(&hv_frame, 0, sizeof(hv_frame));
4007 	memset(&avi_frame, 0, sizeof(avi_frame));
4008 
4009 	timing_out->h_border_left = 0;
4010 	timing_out->h_border_right = 0;
4011 	timing_out->v_border_top = 0;
4012 	timing_out->v_border_bottom = 0;
4013 	/* TODO: un-hardcode */
4014 	if (drm_mode_is_420_only(info, mode_in)
4015 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4016 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4017 	else if (drm_mode_is_420_also(info, mode_in)
4018 			&& aconnector->force_yuv420_output)
4019 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4020 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4021 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4022 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4023 	else
4024 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4025 
4026 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4027 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4028 		connector, connector_state,
4029 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4030 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4031 	timing_out->hdmi_vic = 0;
4032 
4033 	if(old_stream) {
4034 		timing_out->vic = old_stream->timing.vic;
4035 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4036 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4037 	} else {
4038 		timing_out->vic = drm_match_cea_mode(mode_in);
4039 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4040 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4041 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4042 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4043 	}
4044 
4045 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4046 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4047 		timing_out->vic = avi_frame.video_code;
4048 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4049 		timing_out->hdmi_vic = hv_frame.vic;
4050 	}
4051 
4052 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4053 	timing_out->h_total = mode_in->crtc_htotal;
4054 	timing_out->h_sync_width =
4055 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4056 	timing_out->h_front_porch =
4057 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4058 	timing_out->v_total = mode_in->crtc_vtotal;
4059 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4060 	timing_out->v_front_porch =
4061 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4062 	timing_out->v_sync_width =
4063 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4064 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4065 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4066 
4067 	stream->output_color_space = get_output_color_space(timing_out);
4068 
4069 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4070 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4071 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4072 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4073 		    drm_mode_is_420_also(info, mode_in) &&
4074 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4075 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4076 			adjust_colour_depth_from_display_info(timing_out, info);
4077 		}
4078 	}
4079 }
4080 
4081 static void fill_audio_info(struct audio_info *audio_info,
4082 			    const struct drm_connector *drm_connector,
4083 			    const struct dc_sink *dc_sink)
4084 {
4085 	int i = 0;
4086 	int cea_revision = 0;
4087 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4088 
4089 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4090 	audio_info->product_id = edid_caps->product_id;
4091 
4092 	cea_revision = drm_connector->display_info.cea_rev;
4093 
4094 	strscpy(audio_info->display_name,
4095 		edid_caps->display_name,
4096 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4097 
4098 	if (cea_revision >= 3) {
4099 		audio_info->mode_count = edid_caps->audio_mode_count;
4100 
4101 		for (i = 0; i < audio_info->mode_count; ++i) {
4102 			audio_info->modes[i].format_code =
4103 					(enum audio_format_code)
4104 					(edid_caps->audio_modes[i].format_code);
4105 			audio_info->modes[i].channel_count =
4106 					edid_caps->audio_modes[i].channel_count;
4107 			audio_info->modes[i].sample_rates.all =
4108 					edid_caps->audio_modes[i].sample_rate;
4109 			audio_info->modes[i].sample_size =
4110 					edid_caps->audio_modes[i].sample_size;
4111 		}
4112 	}
4113 
4114 	audio_info->flags.all = edid_caps->speaker_flags;
4115 
4116 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4117 	if (drm_connector->latency_present[0]) {
4118 		audio_info->video_latency = drm_connector->video_latency[0];
4119 		audio_info->audio_latency = drm_connector->audio_latency[0];
4120 	}
4121 
4122 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4123 
4124 }
4125 
4126 static void
4127 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4128 				      struct drm_display_mode *dst_mode)
4129 {
4130 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4131 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4132 	dst_mode->crtc_clock = src_mode->crtc_clock;
4133 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4134 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4135 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4136 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4137 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4138 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4139 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4140 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4141 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4142 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4143 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4144 }
4145 
4146 static void
4147 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4148 					const struct drm_display_mode *native_mode,
4149 					bool scale_enabled)
4150 {
4151 	if (scale_enabled) {
4152 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4153 	} else if (native_mode->clock == drm_mode->clock &&
4154 			native_mode->htotal == drm_mode->htotal &&
4155 			native_mode->vtotal == drm_mode->vtotal) {
4156 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4157 	} else {
4158 		/* no scaling nor amdgpu inserted, no need to patch */
4159 	}
4160 }
4161 
4162 static struct dc_sink *
4163 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4164 {
4165 	struct dc_sink_init_data sink_init_data = { 0 };
4166 	struct dc_sink *sink = NULL;
4167 	sink_init_data.link = aconnector->dc_link;
4168 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4169 
4170 	sink = dc_sink_create(&sink_init_data);
4171 	if (!sink) {
4172 		DRM_ERROR("Failed to create sink!\n");
4173 		return NULL;
4174 	}
4175 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4176 
4177 	return sink;
4178 }
4179 
4180 static void set_multisync_trigger_params(
4181 		struct dc_stream_state *stream)
4182 {
4183 	if (stream->triggered_crtc_reset.enabled) {
4184 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4185 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4186 	}
4187 }
4188 
4189 static void set_master_stream(struct dc_stream_state *stream_set[],
4190 			      int stream_count)
4191 {
4192 	int j, highest_rfr = 0, master_stream = 0;
4193 
4194 	for (j = 0;  j < stream_count; j++) {
4195 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4196 			int refresh_rate = 0;
4197 
4198 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4199 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4200 			if (refresh_rate > highest_rfr) {
4201 				highest_rfr = refresh_rate;
4202 				master_stream = j;
4203 			}
4204 		}
4205 	}
4206 	for (j = 0;  j < stream_count; j++) {
4207 		if (stream_set[j])
4208 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4209 	}
4210 }
4211 
4212 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4213 {
4214 	int i = 0;
4215 
4216 	if (context->stream_count < 2)
4217 		return;
4218 	for (i = 0; i < context->stream_count ; i++) {
4219 		if (!context->streams[i])
4220 			continue;
4221 		/*
4222 		 * TODO: add a function to read AMD VSDB bits and set
4223 		 * crtc_sync_master.multi_sync_enabled flag
4224 		 * For now it's set to false
4225 		 */
4226 		set_multisync_trigger_params(context->streams[i]);
4227 	}
4228 	set_master_stream(context->streams, context->stream_count);
4229 }
4230 
4231 static struct dc_stream_state *
4232 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4233 		       const struct drm_display_mode *drm_mode,
4234 		       const struct dm_connector_state *dm_state,
4235 		       const struct dc_stream_state *old_stream)
4236 {
4237 	struct drm_display_mode *preferred_mode = NULL;
4238 	struct drm_connector *drm_connector;
4239 	const struct drm_connector_state *con_state =
4240 		dm_state ? &dm_state->base : NULL;
4241 	struct dc_stream_state *stream = NULL;
4242 	struct drm_display_mode mode = *drm_mode;
4243 	bool native_mode_found = false;
4244 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4245 	int mode_refresh;
4246 	int preferred_refresh = 0;
4247 #if defined(CONFIG_DRM_AMD_DC_DCN)
4248 	struct dsc_dec_dpcd_caps dsc_caps;
4249 #endif
4250 	uint32_t link_bandwidth_kbps;
4251 
4252 	struct dc_sink *sink = NULL;
4253 	if (aconnector == NULL) {
4254 		DRM_ERROR("aconnector is NULL!\n");
4255 		return stream;
4256 	}
4257 
4258 	drm_connector = &aconnector->base;
4259 
4260 	if (!aconnector->dc_sink) {
4261 		sink = create_fake_sink(aconnector);
4262 		if (!sink)
4263 			return stream;
4264 	} else {
4265 		sink = aconnector->dc_sink;
4266 		dc_sink_retain(sink);
4267 	}
4268 
4269 	stream = dc_create_stream_for_sink(sink);
4270 
4271 	if (stream == NULL) {
4272 		DRM_ERROR("Failed to create stream for sink!\n");
4273 		goto finish;
4274 	}
4275 
4276 	stream->dm_stream_context = aconnector;
4277 
4278 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4279 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4280 
4281 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4282 		/* Search for preferred mode */
4283 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4284 			native_mode_found = true;
4285 			break;
4286 		}
4287 	}
4288 	if (!native_mode_found)
4289 		preferred_mode = list_first_entry_or_null(
4290 				&aconnector->base.modes,
4291 				struct drm_display_mode,
4292 				head);
4293 
4294 	mode_refresh = drm_mode_vrefresh(&mode);
4295 
4296 	if (preferred_mode == NULL) {
4297 		/*
4298 		 * This may not be an error, the use case is when we have no
4299 		 * usermode calls to reset and set mode upon hotplug. In this
4300 		 * case, we call set mode ourselves to restore the previous mode
4301 		 * and the modelist may not be filled in in time.
4302 		 */
4303 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4304 	} else {
4305 		decide_crtc_timing_for_drm_display_mode(
4306 				&mode, preferred_mode,
4307 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4308 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4309 	}
4310 
4311 	if (!dm_state)
4312 		drm_mode_set_crtcinfo(&mode, 0);
4313 
4314 	/*
4315 	* If scaling is enabled and refresh rate didn't change
4316 	* we copy the vic and polarities of the old timings
4317 	*/
4318 	if (!scale || mode_refresh != preferred_refresh)
4319 		fill_stream_properties_from_drm_display_mode(stream,
4320 			&mode, &aconnector->base, con_state, NULL);
4321 	else
4322 		fill_stream_properties_from_drm_display_mode(stream,
4323 			&mode, &aconnector->base, con_state, old_stream);
4324 
4325 	stream->timing.flags.DSC = 0;
4326 
4327 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4328 #if defined(CONFIG_DRM_AMD_DC_DCN)
4329 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4330 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4331 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4332 				      &dsc_caps);
4333 #endif
4334 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4335 							     dc_link_get_link_cap(aconnector->dc_link));
4336 
4337 #if defined(CONFIG_DRM_AMD_DC_DCN)
4338 		if (dsc_caps.is_dsc_supported)
4339 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4340 						  &dsc_caps,
4341 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4342 						  link_bandwidth_kbps,
4343 						  &stream->timing,
4344 						  &stream->timing.dsc_cfg))
4345 				stream->timing.flags.DSC = 1;
4346 #endif
4347 	}
4348 
4349 	update_stream_scaling_settings(&mode, dm_state, stream);
4350 
4351 	fill_audio_info(
4352 		&stream->audio_info,
4353 		drm_connector,
4354 		sink);
4355 
4356 	update_stream_signal(stream, sink);
4357 
4358 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4359 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4360 	if (stream->link->psr_settings.psr_feature_enabled)	{
4361 		struct dc  *core_dc = stream->link->ctx->dc;
4362 
4363 		if (dc_is_dmcu_initialized(core_dc)) {
4364 			//
4365 			// should decide stream support vsc sdp colorimetry capability
4366 			// before building vsc info packet
4367 			//
4368 			stream->use_vsc_sdp_for_colorimetry = false;
4369 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4370 				stream->use_vsc_sdp_for_colorimetry =
4371 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4372 			} else {
4373 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4374 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4375 					stream->use_vsc_sdp_for_colorimetry = true;
4376 				}
4377 			}
4378 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4379 		}
4380 	}
4381 finish:
4382 	dc_sink_release(sink);
4383 
4384 	return stream;
4385 }
4386 
4387 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4388 {
4389 	drm_crtc_cleanup(crtc);
4390 	kfree(crtc);
4391 }
4392 
4393 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4394 				  struct drm_crtc_state *state)
4395 {
4396 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4397 
4398 	/* TODO Destroy dc_stream objects are stream object is flattened */
4399 	if (cur->stream)
4400 		dc_stream_release(cur->stream);
4401 
4402 
4403 	__drm_atomic_helper_crtc_destroy_state(state);
4404 
4405 
4406 	kfree(state);
4407 }
4408 
4409 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4410 {
4411 	struct dm_crtc_state *state;
4412 
4413 	if (crtc->state)
4414 		dm_crtc_destroy_state(crtc, crtc->state);
4415 
4416 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4417 	if (WARN_ON(!state))
4418 		return;
4419 
4420 	crtc->state = &state->base;
4421 	crtc->state->crtc = crtc;
4422 
4423 }
4424 
4425 static struct drm_crtc_state *
4426 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4427 {
4428 	struct dm_crtc_state *state, *cur;
4429 
4430 	cur = to_dm_crtc_state(crtc->state);
4431 
4432 	if (WARN_ON(!crtc->state))
4433 		return NULL;
4434 
4435 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4436 	if (!state)
4437 		return NULL;
4438 
4439 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4440 
4441 	if (cur->stream) {
4442 		state->stream = cur->stream;
4443 		dc_stream_retain(state->stream);
4444 	}
4445 
4446 	state->active_planes = cur->active_planes;
4447 	state->interrupts_enabled = cur->interrupts_enabled;
4448 	state->vrr_params = cur->vrr_params;
4449 	state->vrr_infopacket = cur->vrr_infopacket;
4450 	state->abm_level = cur->abm_level;
4451 	state->vrr_supported = cur->vrr_supported;
4452 	state->freesync_config = cur->freesync_config;
4453 	state->crc_src = cur->crc_src;
4454 	state->cm_has_degamma = cur->cm_has_degamma;
4455 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4456 
4457 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4458 
4459 	return &state->base;
4460 }
4461 
4462 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4463 {
4464 	enum dc_irq_source irq_source;
4465 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4466 	struct amdgpu_device *adev = crtc->dev->dev_private;
4467 	int rc;
4468 
4469 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4470 
4471 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4472 
4473 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4474 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4475 	return rc;
4476 }
4477 
4478 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4479 {
4480 	enum dc_irq_source irq_source;
4481 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4482 	struct amdgpu_device *adev = crtc->dev->dev_private;
4483 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4484 	int rc = 0;
4485 
4486 	if (enable) {
4487 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4488 		if (amdgpu_dm_vrr_active(acrtc_state))
4489 			rc = dm_set_vupdate_irq(crtc, true);
4490 	} else {
4491 		/* vblank irq off -> vupdate irq off */
4492 		rc = dm_set_vupdate_irq(crtc, false);
4493 	}
4494 
4495 	if (rc)
4496 		return rc;
4497 
4498 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4499 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4500 }
4501 
4502 static int dm_enable_vblank(struct drm_crtc *crtc)
4503 {
4504 	return dm_set_vblank(crtc, true);
4505 }
4506 
4507 static void dm_disable_vblank(struct drm_crtc *crtc)
4508 {
4509 	dm_set_vblank(crtc, false);
4510 }
4511 
4512 /* Implemented only the options currently availible for the driver */
4513 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4514 	.reset = dm_crtc_reset_state,
4515 	.destroy = amdgpu_dm_crtc_destroy,
4516 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4517 	.set_config = drm_atomic_helper_set_config,
4518 	.page_flip = drm_atomic_helper_page_flip,
4519 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4520 	.atomic_destroy_state = dm_crtc_destroy_state,
4521 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4522 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4523 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4524 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4525 	.enable_vblank = dm_enable_vblank,
4526 	.disable_vblank = dm_disable_vblank,
4527 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4528 };
4529 
4530 static enum drm_connector_status
4531 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4532 {
4533 	bool connected;
4534 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4535 
4536 	/*
4537 	 * Notes:
4538 	 * 1. This interface is NOT called in context of HPD irq.
4539 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4540 	 * makes it a bad place for *any* MST-related activity.
4541 	 */
4542 
4543 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4544 	    !aconnector->fake_enable)
4545 		connected = (aconnector->dc_sink != NULL);
4546 	else
4547 		connected = (aconnector->base.force == DRM_FORCE_ON);
4548 
4549 	return (connected ? connector_status_connected :
4550 			connector_status_disconnected);
4551 }
4552 
4553 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4554 					    struct drm_connector_state *connector_state,
4555 					    struct drm_property *property,
4556 					    uint64_t val)
4557 {
4558 	struct drm_device *dev = connector->dev;
4559 	struct amdgpu_device *adev = dev->dev_private;
4560 	struct dm_connector_state *dm_old_state =
4561 		to_dm_connector_state(connector->state);
4562 	struct dm_connector_state *dm_new_state =
4563 		to_dm_connector_state(connector_state);
4564 
4565 	int ret = -EINVAL;
4566 
4567 	if (property == dev->mode_config.scaling_mode_property) {
4568 		enum amdgpu_rmx_type rmx_type;
4569 
4570 		switch (val) {
4571 		case DRM_MODE_SCALE_CENTER:
4572 			rmx_type = RMX_CENTER;
4573 			break;
4574 		case DRM_MODE_SCALE_ASPECT:
4575 			rmx_type = RMX_ASPECT;
4576 			break;
4577 		case DRM_MODE_SCALE_FULLSCREEN:
4578 			rmx_type = RMX_FULL;
4579 			break;
4580 		case DRM_MODE_SCALE_NONE:
4581 		default:
4582 			rmx_type = RMX_OFF;
4583 			break;
4584 		}
4585 
4586 		if (dm_old_state->scaling == rmx_type)
4587 			return 0;
4588 
4589 		dm_new_state->scaling = rmx_type;
4590 		ret = 0;
4591 	} else if (property == adev->mode_info.underscan_hborder_property) {
4592 		dm_new_state->underscan_hborder = val;
4593 		ret = 0;
4594 	} else if (property == adev->mode_info.underscan_vborder_property) {
4595 		dm_new_state->underscan_vborder = val;
4596 		ret = 0;
4597 	} else if (property == adev->mode_info.underscan_property) {
4598 		dm_new_state->underscan_enable = val;
4599 		ret = 0;
4600 	} else if (property == adev->mode_info.abm_level_property) {
4601 		dm_new_state->abm_level = val;
4602 		ret = 0;
4603 	}
4604 
4605 	return ret;
4606 }
4607 
4608 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4609 					    const struct drm_connector_state *state,
4610 					    struct drm_property *property,
4611 					    uint64_t *val)
4612 {
4613 	struct drm_device *dev = connector->dev;
4614 	struct amdgpu_device *adev = dev->dev_private;
4615 	struct dm_connector_state *dm_state =
4616 		to_dm_connector_state(state);
4617 	int ret = -EINVAL;
4618 
4619 	if (property == dev->mode_config.scaling_mode_property) {
4620 		switch (dm_state->scaling) {
4621 		case RMX_CENTER:
4622 			*val = DRM_MODE_SCALE_CENTER;
4623 			break;
4624 		case RMX_ASPECT:
4625 			*val = DRM_MODE_SCALE_ASPECT;
4626 			break;
4627 		case RMX_FULL:
4628 			*val = DRM_MODE_SCALE_FULLSCREEN;
4629 			break;
4630 		case RMX_OFF:
4631 		default:
4632 			*val = DRM_MODE_SCALE_NONE;
4633 			break;
4634 		}
4635 		ret = 0;
4636 	} else if (property == adev->mode_info.underscan_hborder_property) {
4637 		*val = dm_state->underscan_hborder;
4638 		ret = 0;
4639 	} else if (property == adev->mode_info.underscan_vborder_property) {
4640 		*val = dm_state->underscan_vborder;
4641 		ret = 0;
4642 	} else if (property == adev->mode_info.underscan_property) {
4643 		*val = dm_state->underscan_enable;
4644 		ret = 0;
4645 	} else if (property == adev->mode_info.abm_level_property) {
4646 		*val = dm_state->abm_level;
4647 		ret = 0;
4648 	}
4649 
4650 	return ret;
4651 }
4652 
4653 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4654 {
4655 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4656 
4657 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4658 }
4659 
4660 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4661 {
4662 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4663 	const struct dc_link *link = aconnector->dc_link;
4664 	struct amdgpu_device *adev = connector->dev->dev_private;
4665 	struct amdgpu_display_manager *dm = &adev->dm;
4666 
4667 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4668 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4669 
4670 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4671 	    link->type != dc_connection_none &&
4672 	    dm->backlight_dev) {
4673 		backlight_device_unregister(dm->backlight_dev);
4674 		dm->backlight_dev = NULL;
4675 	}
4676 #endif
4677 
4678 	if (aconnector->dc_em_sink)
4679 		dc_sink_release(aconnector->dc_em_sink);
4680 	aconnector->dc_em_sink = NULL;
4681 	if (aconnector->dc_sink)
4682 		dc_sink_release(aconnector->dc_sink);
4683 	aconnector->dc_sink = NULL;
4684 
4685 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4686 	drm_connector_unregister(connector);
4687 	drm_connector_cleanup(connector);
4688 	if (aconnector->i2c) {
4689 		i2c_del_adapter(&aconnector->i2c->base);
4690 		kfree(aconnector->i2c);
4691 	}
4692 	kfree(aconnector->dm_dp_aux.aux.name);
4693 
4694 	kfree(connector);
4695 }
4696 
4697 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4698 {
4699 	struct dm_connector_state *state =
4700 		to_dm_connector_state(connector->state);
4701 
4702 	if (connector->state)
4703 		__drm_atomic_helper_connector_destroy_state(connector->state);
4704 
4705 	kfree(state);
4706 
4707 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4708 
4709 	if (state) {
4710 		state->scaling = RMX_OFF;
4711 		state->underscan_enable = false;
4712 		state->underscan_hborder = 0;
4713 		state->underscan_vborder = 0;
4714 		state->base.max_requested_bpc = 8;
4715 		state->vcpi_slots = 0;
4716 		state->pbn = 0;
4717 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4718 			state->abm_level = amdgpu_dm_abm_level;
4719 
4720 		__drm_atomic_helper_connector_reset(connector, &state->base);
4721 	}
4722 }
4723 
4724 struct drm_connector_state *
4725 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4726 {
4727 	struct dm_connector_state *state =
4728 		to_dm_connector_state(connector->state);
4729 
4730 	struct dm_connector_state *new_state =
4731 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4732 
4733 	if (!new_state)
4734 		return NULL;
4735 
4736 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4737 
4738 	new_state->freesync_capable = state->freesync_capable;
4739 	new_state->abm_level = state->abm_level;
4740 	new_state->scaling = state->scaling;
4741 	new_state->underscan_enable = state->underscan_enable;
4742 	new_state->underscan_hborder = state->underscan_hborder;
4743 	new_state->underscan_vborder = state->underscan_vborder;
4744 	new_state->vcpi_slots = state->vcpi_slots;
4745 	new_state->pbn = state->pbn;
4746 	return &new_state->base;
4747 }
4748 
4749 static int
4750 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4751 {
4752 #if defined(CONFIG_DEBUG_FS)
4753 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4754 		to_amdgpu_dm_connector(connector);
4755 	int r;
4756 
4757 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4758 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4759 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4760 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4761 		if (r)
4762 			return r;
4763 	}
4764 
4765 	connector_debugfs_init(amdgpu_dm_connector);
4766 #endif
4767 
4768 	return 0;
4769 }
4770 
4771 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4772 	.reset = amdgpu_dm_connector_funcs_reset,
4773 	.detect = amdgpu_dm_connector_detect,
4774 	.fill_modes = drm_helper_probe_single_connector_modes,
4775 	.destroy = amdgpu_dm_connector_destroy,
4776 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4777 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4778 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4779 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4780 	.late_register = amdgpu_dm_connector_late_register,
4781 	.early_unregister = amdgpu_dm_connector_unregister
4782 };
4783 
4784 static int get_modes(struct drm_connector *connector)
4785 {
4786 	return amdgpu_dm_connector_get_modes(connector);
4787 }
4788 
4789 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4790 {
4791 	struct dc_sink_init_data init_params = {
4792 			.link = aconnector->dc_link,
4793 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4794 	};
4795 	struct edid *edid;
4796 
4797 	if (!aconnector->base.edid_blob_ptr) {
4798 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4799 				aconnector->base.name);
4800 
4801 		aconnector->base.force = DRM_FORCE_OFF;
4802 		aconnector->base.override_edid = false;
4803 		return;
4804 	}
4805 
4806 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4807 
4808 	aconnector->edid = edid;
4809 
4810 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4811 		aconnector->dc_link,
4812 		(uint8_t *)edid,
4813 		(edid->extensions + 1) * EDID_LENGTH,
4814 		&init_params);
4815 
4816 	if (aconnector->base.force == DRM_FORCE_ON) {
4817 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4818 		aconnector->dc_link->local_sink :
4819 		aconnector->dc_em_sink;
4820 		dc_sink_retain(aconnector->dc_sink);
4821 	}
4822 }
4823 
4824 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4825 {
4826 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4827 
4828 	/*
4829 	 * In case of headless boot with force on for DP managed connector
4830 	 * Those settings have to be != 0 to get initial modeset
4831 	 */
4832 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4833 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4834 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4835 	}
4836 
4837 
4838 	aconnector->base.override_edid = true;
4839 	create_eml_sink(aconnector);
4840 }
4841 
4842 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4843 				   struct drm_display_mode *mode)
4844 {
4845 	int result = MODE_ERROR;
4846 	struct dc_sink *dc_sink;
4847 	struct amdgpu_device *adev = connector->dev->dev_private;
4848 	/* TODO: Unhardcode stream count */
4849 	struct dc_stream_state *stream;
4850 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4851 	enum dc_status dc_result = DC_OK;
4852 
4853 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4854 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4855 		return result;
4856 
4857 	/*
4858 	 * Only run this the first time mode_valid is called to initilialize
4859 	 * EDID mgmt
4860 	 */
4861 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4862 		!aconnector->dc_em_sink)
4863 		handle_edid_mgmt(aconnector);
4864 
4865 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4866 
4867 	if (dc_sink == NULL) {
4868 		DRM_ERROR("dc_sink is NULL!\n");
4869 		goto fail;
4870 	}
4871 
4872 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4873 	if (stream == NULL) {
4874 		DRM_ERROR("Failed to create stream for sink!\n");
4875 		goto fail;
4876 	}
4877 
4878 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4879 
4880 	if (dc_result == DC_OK)
4881 		result = MODE_OK;
4882 	else
4883 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4884 			      mode->hdisplay,
4885 			      mode->vdisplay,
4886 			      mode->clock,
4887 			      dc_result);
4888 
4889 	dc_stream_release(stream);
4890 
4891 fail:
4892 	/* TODO: error handling*/
4893 	return result;
4894 }
4895 
4896 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4897 				struct dc_info_packet *out)
4898 {
4899 	struct hdmi_drm_infoframe frame;
4900 	unsigned char buf[30]; /* 26 + 4 */
4901 	ssize_t len;
4902 	int ret, i;
4903 
4904 	memset(out, 0, sizeof(*out));
4905 
4906 	if (!state->hdr_output_metadata)
4907 		return 0;
4908 
4909 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4910 	if (ret)
4911 		return ret;
4912 
4913 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4914 	if (len < 0)
4915 		return (int)len;
4916 
4917 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4918 	if (len != 30)
4919 		return -EINVAL;
4920 
4921 	/* Prepare the infopacket for DC. */
4922 	switch (state->connector->connector_type) {
4923 	case DRM_MODE_CONNECTOR_HDMIA:
4924 		out->hb0 = 0x87; /* type */
4925 		out->hb1 = 0x01; /* version */
4926 		out->hb2 = 0x1A; /* length */
4927 		out->sb[0] = buf[3]; /* checksum */
4928 		i = 1;
4929 		break;
4930 
4931 	case DRM_MODE_CONNECTOR_DisplayPort:
4932 	case DRM_MODE_CONNECTOR_eDP:
4933 		out->hb0 = 0x00; /* sdp id, zero */
4934 		out->hb1 = 0x87; /* type */
4935 		out->hb2 = 0x1D; /* payload len - 1 */
4936 		out->hb3 = (0x13 << 2); /* sdp version */
4937 		out->sb[0] = 0x01; /* version */
4938 		out->sb[1] = 0x1A; /* length */
4939 		i = 2;
4940 		break;
4941 
4942 	default:
4943 		return -EINVAL;
4944 	}
4945 
4946 	memcpy(&out->sb[i], &buf[4], 26);
4947 	out->valid = true;
4948 
4949 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4950 		       sizeof(out->sb), false);
4951 
4952 	return 0;
4953 }
4954 
4955 static bool
4956 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4957 			  const struct drm_connector_state *new_state)
4958 {
4959 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4960 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4961 
4962 	if (old_blob != new_blob) {
4963 		if (old_blob && new_blob &&
4964 		    old_blob->length == new_blob->length)
4965 			return memcmp(old_blob->data, new_blob->data,
4966 				      old_blob->length);
4967 
4968 		return true;
4969 	}
4970 
4971 	return false;
4972 }
4973 
4974 static int
4975 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4976 				 struct drm_atomic_state *state)
4977 {
4978 	struct drm_connector_state *new_con_state =
4979 		drm_atomic_get_new_connector_state(state, conn);
4980 	struct drm_connector_state *old_con_state =
4981 		drm_atomic_get_old_connector_state(state, conn);
4982 	struct drm_crtc *crtc = new_con_state->crtc;
4983 	struct drm_crtc_state *new_crtc_state;
4984 	int ret;
4985 
4986 	if (!crtc)
4987 		return 0;
4988 
4989 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4990 		struct dc_info_packet hdr_infopacket;
4991 
4992 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4993 		if (ret)
4994 			return ret;
4995 
4996 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4997 		if (IS_ERR(new_crtc_state))
4998 			return PTR_ERR(new_crtc_state);
4999 
5000 		/*
5001 		 * DC considers the stream backends changed if the
5002 		 * static metadata changes. Forcing the modeset also
5003 		 * gives a simple way for userspace to switch from
5004 		 * 8bpc to 10bpc when setting the metadata to enter
5005 		 * or exit HDR.
5006 		 *
5007 		 * Changing the static metadata after it's been
5008 		 * set is permissible, however. So only force a
5009 		 * modeset if we're entering or exiting HDR.
5010 		 */
5011 		new_crtc_state->mode_changed =
5012 			!old_con_state->hdr_output_metadata ||
5013 			!new_con_state->hdr_output_metadata;
5014 	}
5015 
5016 	return 0;
5017 }
5018 
5019 static const struct drm_connector_helper_funcs
5020 amdgpu_dm_connector_helper_funcs = {
5021 	/*
5022 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5023 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5024 	 * are missing after user start lightdm. So we need to renew modes list.
5025 	 * in get_modes call back, not just return the modes count
5026 	 */
5027 	.get_modes = get_modes,
5028 	.mode_valid = amdgpu_dm_connector_mode_valid,
5029 	.atomic_check = amdgpu_dm_connector_atomic_check,
5030 };
5031 
5032 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5033 {
5034 }
5035 
5036 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5037 {
5038 	struct drm_device *dev = new_crtc_state->crtc->dev;
5039 	struct drm_plane *plane;
5040 
5041 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5042 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5043 			return true;
5044 	}
5045 
5046 	return false;
5047 }
5048 
5049 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5050 {
5051 	struct drm_atomic_state *state = new_crtc_state->state;
5052 	struct drm_plane *plane;
5053 	int num_active = 0;
5054 
5055 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5056 		struct drm_plane_state *new_plane_state;
5057 
5058 		/* Cursor planes are "fake". */
5059 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5060 			continue;
5061 
5062 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5063 
5064 		if (!new_plane_state) {
5065 			/*
5066 			 * The plane is enable on the CRTC and hasn't changed
5067 			 * state. This means that it previously passed
5068 			 * validation and is therefore enabled.
5069 			 */
5070 			num_active += 1;
5071 			continue;
5072 		}
5073 
5074 		/* We need a framebuffer to be considered enabled. */
5075 		num_active += (new_plane_state->fb != NULL);
5076 	}
5077 
5078 	return num_active;
5079 }
5080 
5081 /*
5082  * Sets whether interrupts should be enabled on a specific CRTC.
5083  * We require that the stream be enabled and that there exist active
5084  * DC planes on the stream.
5085  */
5086 static void
5087 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5088 			       struct drm_crtc_state *new_crtc_state)
5089 {
5090 	struct dm_crtc_state *dm_new_crtc_state =
5091 		to_dm_crtc_state(new_crtc_state);
5092 
5093 	dm_new_crtc_state->active_planes = 0;
5094 	dm_new_crtc_state->interrupts_enabled = false;
5095 
5096 	if (!dm_new_crtc_state->stream)
5097 		return;
5098 
5099 	dm_new_crtc_state->active_planes =
5100 		count_crtc_active_planes(new_crtc_state);
5101 
5102 	dm_new_crtc_state->interrupts_enabled =
5103 		dm_new_crtc_state->active_planes > 0;
5104 }
5105 
5106 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5107 				       struct drm_crtc_state *state)
5108 {
5109 	struct amdgpu_device *adev = crtc->dev->dev_private;
5110 	struct dc *dc = adev->dm.dc;
5111 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5112 	int ret = -EINVAL;
5113 
5114 	/*
5115 	 * Update interrupt state for the CRTC. This needs to happen whenever
5116 	 * the CRTC has changed or whenever any of its planes have changed.
5117 	 * Atomic check satisfies both of these requirements since the CRTC
5118 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5119 	 */
5120 	dm_update_crtc_interrupt_state(crtc, state);
5121 
5122 	if (unlikely(!dm_crtc_state->stream &&
5123 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5124 		WARN_ON(1);
5125 		return ret;
5126 	}
5127 
5128 	/* In some use cases, like reset, no stream is attached */
5129 	if (!dm_crtc_state->stream)
5130 		return 0;
5131 
5132 	/*
5133 	 * We want at least one hardware plane enabled to use
5134 	 * the stream with a cursor enabled.
5135 	 */
5136 	if (state->enable && state->active &&
5137 	    does_crtc_have_active_cursor(state) &&
5138 	    dm_crtc_state->active_planes == 0)
5139 		return -EINVAL;
5140 
5141 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5142 		return 0;
5143 
5144 	return ret;
5145 }
5146 
5147 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5148 				      const struct drm_display_mode *mode,
5149 				      struct drm_display_mode *adjusted_mode)
5150 {
5151 	return true;
5152 }
5153 
5154 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5155 	.disable = dm_crtc_helper_disable,
5156 	.atomic_check = dm_crtc_helper_atomic_check,
5157 	.mode_fixup = dm_crtc_helper_mode_fixup,
5158 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5159 };
5160 
5161 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5162 {
5163 
5164 }
5165 
5166 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5167 {
5168 	switch (display_color_depth) {
5169 		case COLOR_DEPTH_666:
5170 			return 6;
5171 		case COLOR_DEPTH_888:
5172 			return 8;
5173 		case COLOR_DEPTH_101010:
5174 			return 10;
5175 		case COLOR_DEPTH_121212:
5176 			return 12;
5177 		case COLOR_DEPTH_141414:
5178 			return 14;
5179 		case COLOR_DEPTH_161616:
5180 			return 16;
5181 		default:
5182 			break;
5183 		}
5184 	return 0;
5185 }
5186 
5187 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5188 					  struct drm_crtc_state *crtc_state,
5189 					  struct drm_connector_state *conn_state)
5190 {
5191 	struct drm_atomic_state *state = crtc_state->state;
5192 	struct drm_connector *connector = conn_state->connector;
5193 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5194 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5195 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5196 	struct drm_dp_mst_topology_mgr *mst_mgr;
5197 	struct drm_dp_mst_port *mst_port;
5198 	enum dc_color_depth color_depth;
5199 	int clock, bpp = 0;
5200 	bool is_y420 = false;
5201 
5202 	if (!aconnector->port || !aconnector->dc_sink)
5203 		return 0;
5204 
5205 	mst_port = aconnector->port;
5206 	mst_mgr = &aconnector->mst_port->mst_mgr;
5207 
5208 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5209 		return 0;
5210 
5211 	if (!state->duplicated) {
5212 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5213 				aconnector->force_yuv420_output;
5214 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5215 								    is_y420);
5216 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5217 		clock = adjusted_mode->clock;
5218 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5219 	}
5220 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5221 									   mst_mgr,
5222 									   mst_port,
5223 									   dm_new_connector_state->pbn,
5224 									   0);
5225 	if (dm_new_connector_state->vcpi_slots < 0) {
5226 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5227 		return dm_new_connector_state->vcpi_slots;
5228 	}
5229 	return 0;
5230 }
5231 
5232 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5233 	.disable = dm_encoder_helper_disable,
5234 	.atomic_check = dm_encoder_helper_atomic_check
5235 };
5236 
5237 #if defined(CONFIG_DRM_AMD_DC_DCN)
5238 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5239 					    struct dc_state *dc_state)
5240 {
5241 	struct dc_stream_state *stream = NULL;
5242 	struct drm_connector *connector;
5243 	struct drm_connector_state *new_con_state, *old_con_state;
5244 	struct amdgpu_dm_connector *aconnector;
5245 	struct dm_connector_state *dm_conn_state;
5246 	int i, j, clock, bpp;
5247 	int vcpi, pbn_div, pbn = 0;
5248 
5249 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5250 
5251 		aconnector = to_amdgpu_dm_connector(connector);
5252 
5253 		if (!aconnector->port)
5254 			continue;
5255 
5256 		if (!new_con_state || !new_con_state->crtc)
5257 			continue;
5258 
5259 		dm_conn_state = to_dm_connector_state(new_con_state);
5260 
5261 		for (j = 0; j < dc_state->stream_count; j++) {
5262 			stream = dc_state->streams[j];
5263 			if (!stream)
5264 				continue;
5265 
5266 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5267 				break;
5268 
5269 			stream = NULL;
5270 		}
5271 
5272 		if (!stream)
5273 			continue;
5274 
5275 		if (stream->timing.flags.DSC != 1) {
5276 			drm_dp_mst_atomic_enable_dsc(state,
5277 						     aconnector->port,
5278 						     dm_conn_state->pbn,
5279 						     0,
5280 						     false);
5281 			continue;
5282 		}
5283 
5284 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5285 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5286 		clock = stream->timing.pix_clk_100hz / 10;
5287 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5288 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5289 						    aconnector->port,
5290 						    pbn, pbn_div,
5291 						    true);
5292 		if (vcpi < 0)
5293 			return vcpi;
5294 
5295 		dm_conn_state->pbn = pbn;
5296 		dm_conn_state->vcpi_slots = vcpi;
5297 	}
5298 	return 0;
5299 }
5300 #endif
5301 
5302 static void dm_drm_plane_reset(struct drm_plane *plane)
5303 {
5304 	struct dm_plane_state *amdgpu_state = NULL;
5305 
5306 	if (plane->state)
5307 		plane->funcs->atomic_destroy_state(plane, plane->state);
5308 
5309 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5310 	WARN_ON(amdgpu_state == NULL);
5311 
5312 	if (amdgpu_state)
5313 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5314 }
5315 
5316 static struct drm_plane_state *
5317 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5318 {
5319 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5320 
5321 	old_dm_plane_state = to_dm_plane_state(plane->state);
5322 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5323 	if (!dm_plane_state)
5324 		return NULL;
5325 
5326 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5327 
5328 	if (old_dm_plane_state->dc_state) {
5329 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5330 		dc_plane_state_retain(dm_plane_state->dc_state);
5331 	}
5332 
5333 	return &dm_plane_state->base;
5334 }
5335 
5336 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5337 				struct drm_plane_state *state)
5338 {
5339 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5340 
5341 	if (dm_plane_state->dc_state)
5342 		dc_plane_state_release(dm_plane_state->dc_state);
5343 
5344 	drm_atomic_helper_plane_destroy_state(plane, state);
5345 }
5346 
5347 static const struct drm_plane_funcs dm_plane_funcs = {
5348 	.update_plane	= drm_atomic_helper_update_plane,
5349 	.disable_plane	= drm_atomic_helper_disable_plane,
5350 	.destroy	= drm_primary_helper_destroy,
5351 	.reset = dm_drm_plane_reset,
5352 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5353 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5354 };
5355 
5356 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5357 				      struct drm_plane_state *new_state)
5358 {
5359 	struct amdgpu_framebuffer *afb;
5360 	struct drm_gem_object *obj;
5361 	struct amdgpu_device *adev;
5362 	struct amdgpu_bo *rbo;
5363 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5364 	struct list_head list;
5365 	struct ttm_validate_buffer tv;
5366 	struct ww_acquire_ctx ticket;
5367 	uint64_t tiling_flags;
5368 	uint32_t domain;
5369 	int r;
5370 	bool tmz_surface = false;
5371 	bool force_disable_dcc = false;
5372 
5373 	dm_plane_state_old = to_dm_plane_state(plane->state);
5374 	dm_plane_state_new = to_dm_plane_state(new_state);
5375 
5376 	if (!new_state->fb) {
5377 		DRM_DEBUG_DRIVER("No FB bound\n");
5378 		return 0;
5379 	}
5380 
5381 	afb = to_amdgpu_framebuffer(new_state->fb);
5382 	obj = new_state->fb->obj[0];
5383 	rbo = gem_to_amdgpu_bo(obj);
5384 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5385 	INIT_LIST_HEAD(&list);
5386 
5387 	tv.bo = &rbo->tbo;
5388 	tv.num_shared = 1;
5389 	list_add(&tv.head, &list);
5390 
5391 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5392 	if (r) {
5393 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5394 		return r;
5395 	}
5396 
5397 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5398 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5399 	else
5400 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5401 
5402 	r = amdgpu_bo_pin(rbo, domain);
5403 	if (unlikely(r != 0)) {
5404 		if (r != -ERESTARTSYS)
5405 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5406 		ttm_eu_backoff_reservation(&ticket, &list);
5407 		return r;
5408 	}
5409 
5410 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5411 	if (unlikely(r != 0)) {
5412 		amdgpu_bo_unpin(rbo);
5413 		ttm_eu_backoff_reservation(&ticket, &list);
5414 		DRM_ERROR("%p bind failed\n", rbo);
5415 		return r;
5416 	}
5417 
5418 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5419 
5420 	tmz_surface = amdgpu_bo_encrypted(rbo);
5421 
5422 	ttm_eu_backoff_reservation(&ticket, &list);
5423 
5424 	afb->address = amdgpu_bo_gpu_offset(rbo);
5425 
5426 	amdgpu_bo_ref(rbo);
5427 
5428 	if (dm_plane_state_new->dc_state &&
5429 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5430 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5431 
5432 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5433 		fill_plane_buffer_attributes(
5434 			adev, afb, plane_state->format, plane_state->rotation,
5435 			tiling_flags, &plane_state->tiling_info,
5436 			&plane_state->plane_size, &plane_state->dcc,
5437 			&plane_state->address, tmz_surface,
5438 			force_disable_dcc);
5439 	}
5440 
5441 	return 0;
5442 }
5443 
5444 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5445 				       struct drm_plane_state *old_state)
5446 {
5447 	struct amdgpu_bo *rbo;
5448 	int r;
5449 
5450 	if (!old_state->fb)
5451 		return;
5452 
5453 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5454 	r = amdgpu_bo_reserve(rbo, false);
5455 	if (unlikely(r)) {
5456 		DRM_ERROR("failed to reserve rbo before unpin\n");
5457 		return;
5458 	}
5459 
5460 	amdgpu_bo_unpin(rbo);
5461 	amdgpu_bo_unreserve(rbo);
5462 	amdgpu_bo_unref(&rbo);
5463 }
5464 
5465 static int dm_plane_atomic_check(struct drm_plane *plane,
5466 				 struct drm_plane_state *state)
5467 {
5468 	struct amdgpu_device *adev = plane->dev->dev_private;
5469 	struct dc *dc = adev->dm.dc;
5470 	struct dm_plane_state *dm_plane_state;
5471 	struct dc_scaling_info scaling_info;
5472 	int ret;
5473 
5474 	dm_plane_state = to_dm_plane_state(state);
5475 
5476 	if (!dm_plane_state->dc_state)
5477 		return 0;
5478 
5479 	ret = fill_dc_scaling_info(state, &scaling_info);
5480 	if (ret)
5481 		return ret;
5482 
5483 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5484 		return 0;
5485 
5486 	return -EINVAL;
5487 }
5488 
5489 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5490 				       struct drm_plane_state *new_plane_state)
5491 {
5492 	/* Only support async updates on cursor planes. */
5493 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5494 		return -EINVAL;
5495 
5496 	return 0;
5497 }
5498 
5499 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5500 					 struct drm_plane_state *new_state)
5501 {
5502 	struct drm_plane_state *old_state =
5503 		drm_atomic_get_old_plane_state(new_state->state, plane);
5504 
5505 	swap(plane->state->fb, new_state->fb);
5506 
5507 	plane->state->src_x = new_state->src_x;
5508 	plane->state->src_y = new_state->src_y;
5509 	plane->state->src_w = new_state->src_w;
5510 	plane->state->src_h = new_state->src_h;
5511 	plane->state->crtc_x = new_state->crtc_x;
5512 	plane->state->crtc_y = new_state->crtc_y;
5513 	plane->state->crtc_w = new_state->crtc_w;
5514 	plane->state->crtc_h = new_state->crtc_h;
5515 
5516 	handle_cursor_update(plane, old_state);
5517 }
5518 
5519 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5520 	.prepare_fb = dm_plane_helper_prepare_fb,
5521 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5522 	.atomic_check = dm_plane_atomic_check,
5523 	.atomic_async_check = dm_plane_atomic_async_check,
5524 	.atomic_async_update = dm_plane_atomic_async_update
5525 };
5526 
5527 /*
5528  * TODO: these are currently initialized to rgb formats only.
5529  * For future use cases we should either initialize them dynamically based on
5530  * plane capabilities, or initialize this array to all formats, so internal drm
5531  * check will succeed, and let DC implement proper check
5532  */
5533 static const uint32_t rgb_formats[] = {
5534 	DRM_FORMAT_XRGB8888,
5535 	DRM_FORMAT_ARGB8888,
5536 	DRM_FORMAT_RGBA8888,
5537 	DRM_FORMAT_XRGB2101010,
5538 	DRM_FORMAT_XBGR2101010,
5539 	DRM_FORMAT_ARGB2101010,
5540 	DRM_FORMAT_ABGR2101010,
5541 	DRM_FORMAT_XBGR8888,
5542 	DRM_FORMAT_ABGR8888,
5543 	DRM_FORMAT_RGB565,
5544 };
5545 
5546 static const uint32_t overlay_formats[] = {
5547 	DRM_FORMAT_XRGB8888,
5548 	DRM_FORMAT_ARGB8888,
5549 	DRM_FORMAT_RGBA8888,
5550 	DRM_FORMAT_XBGR8888,
5551 	DRM_FORMAT_ABGR8888,
5552 	DRM_FORMAT_RGB565
5553 };
5554 
5555 static const u32 cursor_formats[] = {
5556 	DRM_FORMAT_ARGB8888
5557 };
5558 
5559 static int get_plane_formats(const struct drm_plane *plane,
5560 			     const struct dc_plane_cap *plane_cap,
5561 			     uint32_t *formats, int max_formats)
5562 {
5563 	int i, num_formats = 0;
5564 
5565 	/*
5566 	 * TODO: Query support for each group of formats directly from
5567 	 * DC plane caps. This will require adding more formats to the
5568 	 * caps list.
5569 	 */
5570 
5571 	switch (plane->type) {
5572 	case DRM_PLANE_TYPE_PRIMARY:
5573 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5574 			if (num_formats >= max_formats)
5575 				break;
5576 
5577 			formats[num_formats++] = rgb_formats[i];
5578 		}
5579 
5580 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5581 			formats[num_formats++] = DRM_FORMAT_NV12;
5582 		if (plane_cap && plane_cap->pixel_format_support.p010)
5583 			formats[num_formats++] = DRM_FORMAT_P010;
5584 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5585 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5586 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5587 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5588 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5589 		}
5590 		break;
5591 
5592 	case DRM_PLANE_TYPE_OVERLAY:
5593 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5594 			if (num_formats >= max_formats)
5595 				break;
5596 
5597 			formats[num_formats++] = overlay_formats[i];
5598 		}
5599 		break;
5600 
5601 	case DRM_PLANE_TYPE_CURSOR:
5602 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5603 			if (num_formats >= max_formats)
5604 				break;
5605 
5606 			formats[num_formats++] = cursor_formats[i];
5607 		}
5608 		break;
5609 	}
5610 
5611 	return num_formats;
5612 }
5613 
5614 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5615 				struct drm_plane *plane,
5616 				unsigned long possible_crtcs,
5617 				const struct dc_plane_cap *plane_cap)
5618 {
5619 	uint32_t formats[32];
5620 	int num_formats;
5621 	int res = -EPERM;
5622 
5623 	num_formats = get_plane_formats(plane, plane_cap, formats,
5624 					ARRAY_SIZE(formats));
5625 
5626 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5627 				       &dm_plane_funcs, formats, num_formats,
5628 				       NULL, plane->type, NULL);
5629 	if (res)
5630 		return res;
5631 
5632 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5633 	    plane_cap && plane_cap->per_pixel_alpha) {
5634 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5635 					  BIT(DRM_MODE_BLEND_PREMULTI);
5636 
5637 		drm_plane_create_alpha_property(plane);
5638 		drm_plane_create_blend_mode_property(plane, blend_caps);
5639 	}
5640 
5641 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5642 	    plane_cap &&
5643 	    (plane_cap->pixel_format_support.nv12 ||
5644 	     plane_cap->pixel_format_support.p010)) {
5645 		/* This only affects YUV formats. */
5646 		drm_plane_create_color_properties(
5647 			plane,
5648 			BIT(DRM_COLOR_YCBCR_BT601) |
5649 			BIT(DRM_COLOR_YCBCR_BT709) |
5650 			BIT(DRM_COLOR_YCBCR_BT2020),
5651 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5652 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5653 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5654 	}
5655 
5656 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5657 
5658 	/* Create (reset) the plane state */
5659 	if (plane->funcs->reset)
5660 		plane->funcs->reset(plane);
5661 
5662 	return 0;
5663 }
5664 
5665 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5666 			       struct drm_plane *plane,
5667 			       uint32_t crtc_index)
5668 {
5669 	struct amdgpu_crtc *acrtc = NULL;
5670 	struct drm_plane *cursor_plane;
5671 
5672 	int res = -ENOMEM;
5673 
5674 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5675 	if (!cursor_plane)
5676 		goto fail;
5677 
5678 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5679 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5680 
5681 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5682 	if (!acrtc)
5683 		goto fail;
5684 
5685 	res = drm_crtc_init_with_planes(
5686 			dm->ddev,
5687 			&acrtc->base,
5688 			plane,
5689 			cursor_plane,
5690 			&amdgpu_dm_crtc_funcs, NULL);
5691 
5692 	if (res)
5693 		goto fail;
5694 
5695 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5696 
5697 	/* Create (reset) the plane state */
5698 	if (acrtc->base.funcs->reset)
5699 		acrtc->base.funcs->reset(&acrtc->base);
5700 
5701 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5702 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5703 
5704 	acrtc->crtc_id = crtc_index;
5705 	acrtc->base.enabled = false;
5706 	acrtc->otg_inst = -1;
5707 
5708 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5709 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5710 				   true, MAX_COLOR_LUT_ENTRIES);
5711 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5712 
5713 	return 0;
5714 
5715 fail:
5716 	kfree(acrtc);
5717 	kfree(cursor_plane);
5718 	return res;
5719 }
5720 
5721 
5722 static int to_drm_connector_type(enum signal_type st)
5723 {
5724 	switch (st) {
5725 	case SIGNAL_TYPE_HDMI_TYPE_A:
5726 		return DRM_MODE_CONNECTOR_HDMIA;
5727 	case SIGNAL_TYPE_EDP:
5728 		return DRM_MODE_CONNECTOR_eDP;
5729 	case SIGNAL_TYPE_LVDS:
5730 		return DRM_MODE_CONNECTOR_LVDS;
5731 	case SIGNAL_TYPE_RGB:
5732 		return DRM_MODE_CONNECTOR_VGA;
5733 	case SIGNAL_TYPE_DISPLAY_PORT:
5734 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5735 		return DRM_MODE_CONNECTOR_DisplayPort;
5736 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5737 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5738 		return DRM_MODE_CONNECTOR_DVID;
5739 	case SIGNAL_TYPE_VIRTUAL:
5740 		return DRM_MODE_CONNECTOR_VIRTUAL;
5741 
5742 	default:
5743 		return DRM_MODE_CONNECTOR_Unknown;
5744 	}
5745 }
5746 
5747 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5748 {
5749 	struct drm_encoder *encoder;
5750 
5751 	/* There is only one encoder per connector */
5752 	drm_connector_for_each_possible_encoder(connector, encoder)
5753 		return encoder;
5754 
5755 	return NULL;
5756 }
5757 
5758 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5759 {
5760 	struct drm_encoder *encoder;
5761 	struct amdgpu_encoder *amdgpu_encoder;
5762 
5763 	encoder = amdgpu_dm_connector_to_encoder(connector);
5764 
5765 	if (encoder == NULL)
5766 		return;
5767 
5768 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5769 
5770 	amdgpu_encoder->native_mode.clock = 0;
5771 
5772 	if (!list_empty(&connector->probed_modes)) {
5773 		struct drm_display_mode *preferred_mode = NULL;
5774 
5775 		list_for_each_entry(preferred_mode,
5776 				    &connector->probed_modes,
5777 				    head) {
5778 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5779 				amdgpu_encoder->native_mode = *preferred_mode;
5780 
5781 			break;
5782 		}
5783 
5784 	}
5785 }
5786 
5787 static struct drm_display_mode *
5788 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5789 			     char *name,
5790 			     int hdisplay, int vdisplay)
5791 {
5792 	struct drm_device *dev = encoder->dev;
5793 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5794 	struct drm_display_mode *mode = NULL;
5795 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5796 
5797 	mode = drm_mode_duplicate(dev, native_mode);
5798 
5799 	if (mode == NULL)
5800 		return NULL;
5801 
5802 	mode->hdisplay = hdisplay;
5803 	mode->vdisplay = vdisplay;
5804 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5805 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5806 
5807 	return mode;
5808 
5809 }
5810 
5811 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5812 						 struct drm_connector *connector)
5813 {
5814 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5815 	struct drm_display_mode *mode = NULL;
5816 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5817 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5818 				to_amdgpu_dm_connector(connector);
5819 	int i;
5820 	int n;
5821 	struct mode_size {
5822 		char name[DRM_DISPLAY_MODE_LEN];
5823 		int w;
5824 		int h;
5825 	} common_modes[] = {
5826 		{  "640x480",  640,  480},
5827 		{  "800x600",  800,  600},
5828 		{ "1024x768", 1024,  768},
5829 		{ "1280x720", 1280,  720},
5830 		{ "1280x800", 1280,  800},
5831 		{"1280x1024", 1280, 1024},
5832 		{ "1440x900", 1440,  900},
5833 		{"1680x1050", 1680, 1050},
5834 		{"1600x1200", 1600, 1200},
5835 		{"1920x1080", 1920, 1080},
5836 		{"1920x1200", 1920, 1200}
5837 	};
5838 
5839 	n = ARRAY_SIZE(common_modes);
5840 
5841 	for (i = 0; i < n; i++) {
5842 		struct drm_display_mode *curmode = NULL;
5843 		bool mode_existed = false;
5844 
5845 		if (common_modes[i].w > native_mode->hdisplay ||
5846 		    common_modes[i].h > native_mode->vdisplay ||
5847 		   (common_modes[i].w == native_mode->hdisplay &&
5848 		    common_modes[i].h == native_mode->vdisplay))
5849 			continue;
5850 
5851 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5852 			if (common_modes[i].w == curmode->hdisplay &&
5853 			    common_modes[i].h == curmode->vdisplay) {
5854 				mode_existed = true;
5855 				break;
5856 			}
5857 		}
5858 
5859 		if (mode_existed)
5860 			continue;
5861 
5862 		mode = amdgpu_dm_create_common_mode(encoder,
5863 				common_modes[i].name, common_modes[i].w,
5864 				common_modes[i].h);
5865 		drm_mode_probed_add(connector, mode);
5866 		amdgpu_dm_connector->num_modes++;
5867 	}
5868 }
5869 
5870 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5871 					      struct edid *edid)
5872 {
5873 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5874 			to_amdgpu_dm_connector(connector);
5875 
5876 	if (edid) {
5877 		/* empty probed_modes */
5878 		INIT_LIST_HEAD(&connector->probed_modes);
5879 		amdgpu_dm_connector->num_modes =
5880 				drm_add_edid_modes(connector, edid);
5881 
5882 		/* sorting the probed modes before calling function
5883 		 * amdgpu_dm_get_native_mode() since EDID can have
5884 		 * more than one preferred mode. The modes that are
5885 		 * later in the probed mode list could be of higher
5886 		 * and preferred resolution. For example, 3840x2160
5887 		 * resolution in base EDID preferred timing and 4096x2160
5888 		 * preferred resolution in DID extension block later.
5889 		 */
5890 		drm_mode_sort(&connector->probed_modes);
5891 		amdgpu_dm_get_native_mode(connector);
5892 	} else {
5893 		amdgpu_dm_connector->num_modes = 0;
5894 	}
5895 }
5896 
5897 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5898 {
5899 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5900 			to_amdgpu_dm_connector(connector);
5901 	struct drm_encoder *encoder;
5902 	struct edid *edid = amdgpu_dm_connector->edid;
5903 
5904 	encoder = amdgpu_dm_connector_to_encoder(connector);
5905 
5906 	if (!edid || !drm_edid_is_valid(edid)) {
5907 		amdgpu_dm_connector->num_modes =
5908 				drm_add_modes_noedid(connector, 640, 480);
5909 	} else {
5910 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5911 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5912 	}
5913 	amdgpu_dm_fbc_init(connector);
5914 
5915 	return amdgpu_dm_connector->num_modes;
5916 }
5917 
5918 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5919 				     struct amdgpu_dm_connector *aconnector,
5920 				     int connector_type,
5921 				     struct dc_link *link,
5922 				     int link_index)
5923 {
5924 	struct amdgpu_device *adev = dm->ddev->dev_private;
5925 
5926 	/*
5927 	 * Some of the properties below require access to state, like bpc.
5928 	 * Allocate some default initial connector state with our reset helper.
5929 	 */
5930 	if (aconnector->base.funcs->reset)
5931 		aconnector->base.funcs->reset(&aconnector->base);
5932 
5933 	aconnector->connector_id = link_index;
5934 	aconnector->dc_link = link;
5935 	aconnector->base.interlace_allowed = false;
5936 	aconnector->base.doublescan_allowed = false;
5937 	aconnector->base.stereo_allowed = false;
5938 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5939 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5940 	aconnector->audio_inst = -1;
5941 	mutex_init(&aconnector->hpd_lock);
5942 
5943 	/*
5944 	 * configure support HPD hot plug connector_>polled default value is 0
5945 	 * which means HPD hot plug not supported
5946 	 */
5947 	switch (connector_type) {
5948 	case DRM_MODE_CONNECTOR_HDMIA:
5949 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5950 		aconnector->base.ycbcr_420_allowed =
5951 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5952 		break;
5953 	case DRM_MODE_CONNECTOR_DisplayPort:
5954 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5955 		aconnector->base.ycbcr_420_allowed =
5956 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5957 		break;
5958 	case DRM_MODE_CONNECTOR_DVID:
5959 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5960 		break;
5961 	default:
5962 		break;
5963 	}
5964 
5965 	drm_object_attach_property(&aconnector->base.base,
5966 				dm->ddev->mode_config.scaling_mode_property,
5967 				DRM_MODE_SCALE_NONE);
5968 
5969 	drm_object_attach_property(&aconnector->base.base,
5970 				adev->mode_info.underscan_property,
5971 				UNDERSCAN_OFF);
5972 	drm_object_attach_property(&aconnector->base.base,
5973 				adev->mode_info.underscan_hborder_property,
5974 				0);
5975 	drm_object_attach_property(&aconnector->base.base,
5976 				adev->mode_info.underscan_vborder_property,
5977 				0);
5978 
5979 	if (!aconnector->mst_port)
5980 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5981 
5982 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5983 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5984 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5985 
5986 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5987 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5988 		drm_object_attach_property(&aconnector->base.base,
5989 				adev->mode_info.abm_level_property, 0);
5990 	}
5991 
5992 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5993 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5994 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5995 		drm_object_attach_property(
5996 			&aconnector->base.base,
5997 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5998 
5999 		if (!aconnector->mst_port)
6000 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6001 
6002 #ifdef CONFIG_DRM_AMD_DC_HDCP
6003 		if (adev->dm.hdcp_workqueue)
6004 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6005 #endif
6006 	}
6007 }
6008 
6009 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6010 			      struct i2c_msg *msgs, int num)
6011 {
6012 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6013 	struct ddc_service *ddc_service = i2c->ddc_service;
6014 	struct i2c_command cmd;
6015 	int i;
6016 	int result = -EIO;
6017 
6018 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6019 
6020 	if (!cmd.payloads)
6021 		return result;
6022 
6023 	cmd.number_of_payloads = num;
6024 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6025 	cmd.speed = 100;
6026 
6027 	for (i = 0; i < num; i++) {
6028 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6029 		cmd.payloads[i].address = msgs[i].addr;
6030 		cmd.payloads[i].length = msgs[i].len;
6031 		cmd.payloads[i].data = msgs[i].buf;
6032 	}
6033 
6034 	if (dc_submit_i2c(
6035 			ddc_service->ctx->dc,
6036 			ddc_service->ddc_pin->hw_info.ddc_channel,
6037 			&cmd))
6038 		result = num;
6039 
6040 	kfree(cmd.payloads);
6041 	return result;
6042 }
6043 
6044 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6045 {
6046 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6047 }
6048 
6049 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6050 	.master_xfer = amdgpu_dm_i2c_xfer,
6051 	.functionality = amdgpu_dm_i2c_func,
6052 };
6053 
6054 static struct amdgpu_i2c_adapter *
6055 create_i2c(struct ddc_service *ddc_service,
6056 	   int link_index,
6057 	   int *res)
6058 {
6059 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6060 	struct amdgpu_i2c_adapter *i2c;
6061 
6062 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6063 	if (!i2c)
6064 		return NULL;
6065 	i2c->base.owner = THIS_MODULE;
6066 	i2c->base.class = I2C_CLASS_DDC;
6067 	i2c->base.dev.parent = &adev->pdev->dev;
6068 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6069 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6070 	i2c_set_adapdata(&i2c->base, i2c);
6071 	i2c->ddc_service = ddc_service;
6072 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6073 
6074 	return i2c;
6075 }
6076 
6077 
6078 /*
6079  * Note: this function assumes that dc_link_detect() was called for the
6080  * dc_link which will be represented by this aconnector.
6081  */
6082 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6083 				    struct amdgpu_dm_connector *aconnector,
6084 				    uint32_t link_index,
6085 				    struct amdgpu_encoder *aencoder)
6086 {
6087 	int res = 0;
6088 	int connector_type;
6089 	struct dc *dc = dm->dc;
6090 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6091 	struct amdgpu_i2c_adapter *i2c;
6092 
6093 	link->priv = aconnector;
6094 
6095 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6096 
6097 	i2c = create_i2c(link->ddc, link->link_index, &res);
6098 	if (!i2c) {
6099 		DRM_ERROR("Failed to create i2c adapter data\n");
6100 		return -ENOMEM;
6101 	}
6102 
6103 	aconnector->i2c = i2c;
6104 	res = i2c_add_adapter(&i2c->base);
6105 
6106 	if (res) {
6107 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6108 		goto out_free;
6109 	}
6110 
6111 	connector_type = to_drm_connector_type(link->connector_signal);
6112 
6113 	res = drm_connector_init_with_ddc(
6114 			dm->ddev,
6115 			&aconnector->base,
6116 			&amdgpu_dm_connector_funcs,
6117 			connector_type,
6118 			&i2c->base);
6119 
6120 	if (res) {
6121 		DRM_ERROR("connector_init failed\n");
6122 		aconnector->connector_id = -1;
6123 		goto out_free;
6124 	}
6125 
6126 	drm_connector_helper_add(
6127 			&aconnector->base,
6128 			&amdgpu_dm_connector_helper_funcs);
6129 
6130 	amdgpu_dm_connector_init_helper(
6131 		dm,
6132 		aconnector,
6133 		connector_type,
6134 		link,
6135 		link_index);
6136 
6137 	drm_connector_attach_encoder(
6138 		&aconnector->base, &aencoder->base);
6139 
6140 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6141 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6142 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6143 
6144 out_free:
6145 	if (res) {
6146 		kfree(i2c);
6147 		aconnector->i2c = NULL;
6148 	}
6149 	return res;
6150 }
6151 
6152 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6153 {
6154 	switch (adev->mode_info.num_crtc) {
6155 	case 1:
6156 		return 0x1;
6157 	case 2:
6158 		return 0x3;
6159 	case 3:
6160 		return 0x7;
6161 	case 4:
6162 		return 0xf;
6163 	case 5:
6164 		return 0x1f;
6165 	case 6:
6166 	default:
6167 		return 0x3f;
6168 	}
6169 }
6170 
6171 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6172 				  struct amdgpu_encoder *aencoder,
6173 				  uint32_t link_index)
6174 {
6175 	struct amdgpu_device *adev = dev->dev_private;
6176 
6177 	int res = drm_encoder_init(dev,
6178 				   &aencoder->base,
6179 				   &amdgpu_dm_encoder_funcs,
6180 				   DRM_MODE_ENCODER_TMDS,
6181 				   NULL);
6182 
6183 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6184 
6185 	if (!res)
6186 		aencoder->encoder_id = link_index;
6187 	else
6188 		aencoder->encoder_id = -1;
6189 
6190 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6191 
6192 	return res;
6193 }
6194 
6195 static void manage_dm_interrupts(struct amdgpu_device *adev,
6196 				 struct amdgpu_crtc *acrtc,
6197 				 bool enable)
6198 {
6199 	/*
6200 	 * this is not correct translation but will work as soon as VBLANK
6201 	 * constant is the same as PFLIP
6202 	 */
6203 	int irq_type =
6204 		amdgpu_display_crtc_idx_to_irq_type(
6205 			adev,
6206 			acrtc->crtc_id);
6207 
6208 	if (enable) {
6209 		drm_crtc_vblank_on(&acrtc->base);
6210 		amdgpu_irq_get(
6211 			adev,
6212 			&adev->pageflip_irq,
6213 			irq_type);
6214 	} else {
6215 
6216 		amdgpu_irq_put(
6217 			adev,
6218 			&adev->pageflip_irq,
6219 			irq_type);
6220 		drm_crtc_vblank_off(&acrtc->base);
6221 	}
6222 }
6223 
6224 static bool
6225 is_scaling_state_different(const struct dm_connector_state *dm_state,
6226 			   const struct dm_connector_state *old_dm_state)
6227 {
6228 	if (dm_state->scaling != old_dm_state->scaling)
6229 		return true;
6230 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6231 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6232 			return true;
6233 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6234 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6235 			return true;
6236 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6237 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6238 		return true;
6239 	return false;
6240 }
6241 
6242 #ifdef CONFIG_DRM_AMD_DC_HDCP
6243 static bool is_content_protection_different(struct drm_connector_state *state,
6244 					    const struct drm_connector_state *old_state,
6245 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6246 {
6247 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6248 
6249 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6250 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6251 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6252 		return true;
6253 	}
6254 
6255 	/* CP is being re enabled, ignore this */
6256 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6257 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6258 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6259 		return false;
6260 	}
6261 
6262 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6263 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6264 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6265 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6266 
6267 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6268 	 * hot-plug, headless s3, dpms
6269 	 */
6270 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6271 	    aconnector->dc_sink != NULL)
6272 		return true;
6273 
6274 	if (old_state->content_protection == state->content_protection)
6275 		return false;
6276 
6277 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6278 		return true;
6279 
6280 	return false;
6281 }
6282 
6283 #endif
6284 static void remove_stream(struct amdgpu_device *adev,
6285 			  struct amdgpu_crtc *acrtc,
6286 			  struct dc_stream_state *stream)
6287 {
6288 	/* this is the update mode case */
6289 
6290 	acrtc->otg_inst = -1;
6291 	acrtc->enabled = false;
6292 }
6293 
6294 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6295 			       struct dc_cursor_position *position)
6296 {
6297 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6298 	int x, y;
6299 	int xorigin = 0, yorigin = 0;
6300 
6301 	position->enable = false;
6302 	position->x = 0;
6303 	position->y = 0;
6304 
6305 	if (!crtc || !plane->state->fb)
6306 		return 0;
6307 
6308 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6309 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6310 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6311 			  __func__,
6312 			  plane->state->crtc_w,
6313 			  plane->state->crtc_h);
6314 		return -EINVAL;
6315 	}
6316 
6317 	x = plane->state->crtc_x;
6318 	y = plane->state->crtc_y;
6319 
6320 	if (x <= -amdgpu_crtc->max_cursor_width ||
6321 	    y <= -amdgpu_crtc->max_cursor_height)
6322 		return 0;
6323 
6324 	if (x < 0) {
6325 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6326 		x = 0;
6327 	}
6328 	if (y < 0) {
6329 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6330 		y = 0;
6331 	}
6332 	position->enable = true;
6333 	position->translate_by_source = true;
6334 	position->x = x;
6335 	position->y = y;
6336 	position->x_hotspot = xorigin;
6337 	position->y_hotspot = yorigin;
6338 
6339 	return 0;
6340 }
6341 
6342 static void handle_cursor_update(struct drm_plane *plane,
6343 				 struct drm_plane_state *old_plane_state)
6344 {
6345 	struct amdgpu_device *adev = plane->dev->dev_private;
6346 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6347 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6348 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6349 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6350 	uint64_t address = afb ? afb->address : 0;
6351 	struct dc_cursor_position position;
6352 	struct dc_cursor_attributes attributes;
6353 	int ret;
6354 
6355 	if (!plane->state->fb && !old_plane_state->fb)
6356 		return;
6357 
6358 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6359 			 __func__,
6360 			 amdgpu_crtc->crtc_id,
6361 			 plane->state->crtc_w,
6362 			 plane->state->crtc_h);
6363 
6364 	ret = get_cursor_position(plane, crtc, &position);
6365 	if (ret)
6366 		return;
6367 
6368 	if (!position.enable) {
6369 		/* turn off cursor */
6370 		if (crtc_state && crtc_state->stream) {
6371 			mutex_lock(&adev->dm.dc_lock);
6372 			dc_stream_set_cursor_position(crtc_state->stream,
6373 						      &position);
6374 			mutex_unlock(&adev->dm.dc_lock);
6375 		}
6376 		return;
6377 	}
6378 
6379 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6380 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6381 
6382 	memset(&attributes, 0, sizeof(attributes));
6383 	attributes.address.high_part = upper_32_bits(address);
6384 	attributes.address.low_part  = lower_32_bits(address);
6385 	attributes.width             = plane->state->crtc_w;
6386 	attributes.height            = plane->state->crtc_h;
6387 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6388 	attributes.rotation_angle    = 0;
6389 	attributes.attribute_flags.value = 0;
6390 
6391 	attributes.pitch = attributes.width;
6392 
6393 	if (crtc_state->stream) {
6394 		mutex_lock(&adev->dm.dc_lock);
6395 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6396 							 &attributes))
6397 			DRM_ERROR("DC failed to set cursor attributes\n");
6398 
6399 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6400 						   &position))
6401 			DRM_ERROR("DC failed to set cursor position\n");
6402 		mutex_unlock(&adev->dm.dc_lock);
6403 	}
6404 }
6405 
6406 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6407 {
6408 
6409 	assert_spin_locked(&acrtc->base.dev->event_lock);
6410 	WARN_ON(acrtc->event);
6411 
6412 	acrtc->event = acrtc->base.state->event;
6413 
6414 	/* Set the flip status */
6415 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6416 
6417 	/* Mark this event as consumed */
6418 	acrtc->base.state->event = NULL;
6419 
6420 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6421 						 acrtc->crtc_id);
6422 }
6423 
6424 static void update_freesync_state_on_stream(
6425 	struct amdgpu_display_manager *dm,
6426 	struct dm_crtc_state *new_crtc_state,
6427 	struct dc_stream_state *new_stream,
6428 	struct dc_plane_state *surface,
6429 	u32 flip_timestamp_in_us)
6430 {
6431 	struct mod_vrr_params vrr_params;
6432 	struct dc_info_packet vrr_infopacket = {0};
6433 	struct amdgpu_device *adev = dm->adev;
6434 	unsigned long flags;
6435 
6436 	if (!new_stream)
6437 		return;
6438 
6439 	/*
6440 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6441 	 * For now it's sufficient to just guard against these conditions.
6442 	 */
6443 
6444 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6445 		return;
6446 
6447 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6448 	vrr_params = new_crtc_state->vrr_params;
6449 
6450 	if (surface) {
6451 		mod_freesync_handle_preflip(
6452 			dm->freesync_module,
6453 			surface,
6454 			new_stream,
6455 			flip_timestamp_in_us,
6456 			&vrr_params);
6457 
6458 		if (adev->family < AMDGPU_FAMILY_AI &&
6459 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6460 			mod_freesync_handle_v_update(dm->freesync_module,
6461 						     new_stream, &vrr_params);
6462 
6463 			/* Need to call this before the frame ends. */
6464 			dc_stream_adjust_vmin_vmax(dm->dc,
6465 						   new_crtc_state->stream,
6466 						   &vrr_params.adjust);
6467 		}
6468 	}
6469 
6470 	mod_freesync_build_vrr_infopacket(
6471 		dm->freesync_module,
6472 		new_stream,
6473 		&vrr_params,
6474 		PACKET_TYPE_VRR,
6475 		TRANSFER_FUNC_UNKNOWN,
6476 		&vrr_infopacket);
6477 
6478 	new_crtc_state->freesync_timing_changed |=
6479 		(memcmp(&new_crtc_state->vrr_params.adjust,
6480 			&vrr_params.adjust,
6481 			sizeof(vrr_params.adjust)) != 0);
6482 
6483 	new_crtc_state->freesync_vrr_info_changed |=
6484 		(memcmp(&new_crtc_state->vrr_infopacket,
6485 			&vrr_infopacket,
6486 			sizeof(vrr_infopacket)) != 0);
6487 
6488 	new_crtc_state->vrr_params = vrr_params;
6489 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6490 
6491 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6492 	new_stream->vrr_infopacket = vrr_infopacket;
6493 
6494 	if (new_crtc_state->freesync_vrr_info_changed)
6495 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6496 			      new_crtc_state->base.crtc->base.id,
6497 			      (int)new_crtc_state->base.vrr_enabled,
6498 			      (int)vrr_params.state);
6499 
6500 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6501 }
6502 
6503 static void pre_update_freesync_state_on_stream(
6504 	struct amdgpu_display_manager *dm,
6505 	struct dm_crtc_state *new_crtc_state)
6506 {
6507 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6508 	struct mod_vrr_params vrr_params;
6509 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6510 	struct amdgpu_device *adev = dm->adev;
6511 	unsigned long flags;
6512 
6513 	if (!new_stream)
6514 		return;
6515 
6516 	/*
6517 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6518 	 * For now it's sufficient to just guard against these conditions.
6519 	 */
6520 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6521 		return;
6522 
6523 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6524 	vrr_params = new_crtc_state->vrr_params;
6525 
6526 	if (new_crtc_state->vrr_supported &&
6527 	    config.min_refresh_in_uhz &&
6528 	    config.max_refresh_in_uhz) {
6529 		config.state = new_crtc_state->base.vrr_enabled ?
6530 			VRR_STATE_ACTIVE_VARIABLE :
6531 			VRR_STATE_INACTIVE;
6532 	} else {
6533 		config.state = VRR_STATE_UNSUPPORTED;
6534 	}
6535 
6536 	mod_freesync_build_vrr_params(dm->freesync_module,
6537 				      new_stream,
6538 				      &config, &vrr_params);
6539 
6540 	new_crtc_state->freesync_timing_changed |=
6541 		(memcmp(&new_crtc_state->vrr_params.adjust,
6542 			&vrr_params.adjust,
6543 			sizeof(vrr_params.adjust)) != 0);
6544 
6545 	new_crtc_state->vrr_params = vrr_params;
6546 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6547 }
6548 
6549 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6550 					    struct dm_crtc_state *new_state)
6551 {
6552 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6553 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6554 
6555 	if (!old_vrr_active && new_vrr_active) {
6556 		/* Transition VRR inactive -> active:
6557 		 * While VRR is active, we must not disable vblank irq, as a
6558 		 * reenable after disable would compute bogus vblank/pflip
6559 		 * timestamps if it likely happened inside display front-porch.
6560 		 *
6561 		 * We also need vupdate irq for the actual core vblank handling
6562 		 * at end of vblank.
6563 		 */
6564 		dm_set_vupdate_irq(new_state->base.crtc, true);
6565 		drm_crtc_vblank_get(new_state->base.crtc);
6566 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6567 				 __func__, new_state->base.crtc->base.id);
6568 	} else if (old_vrr_active && !new_vrr_active) {
6569 		/* Transition VRR active -> inactive:
6570 		 * Allow vblank irq disable again for fixed refresh rate.
6571 		 */
6572 		dm_set_vupdate_irq(new_state->base.crtc, false);
6573 		drm_crtc_vblank_put(new_state->base.crtc);
6574 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6575 				 __func__, new_state->base.crtc->base.id);
6576 	}
6577 }
6578 
6579 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6580 {
6581 	struct drm_plane *plane;
6582 	struct drm_plane_state *old_plane_state, *new_plane_state;
6583 	int i;
6584 
6585 	/*
6586 	 * TODO: Make this per-stream so we don't issue redundant updates for
6587 	 * commits with multiple streams.
6588 	 */
6589 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6590 				       new_plane_state, i)
6591 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6592 			handle_cursor_update(plane, old_plane_state);
6593 }
6594 
6595 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6596 				    struct dc_state *dc_state,
6597 				    struct drm_device *dev,
6598 				    struct amdgpu_display_manager *dm,
6599 				    struct drm_crtc *pcrtc,
6600 				    bool wait_for_vblank)
6601 {
6602 	uint32_t i;
6603 	uint64_t timestamp_ns;
6604 	struct drm_plane *plane;
6605 	struct drm_plane_state *old_plane_state, *new_plane_state;
6606 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6607 	struct drm_crtc_state *new_pcrtc_state =
6608 			drm_atomic_get_new_crtc_state(state, pcrtc);
6609 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6610 	struct dm_crtc_state *dm_old_crtc_state =
6611 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6612 	int planes_count = 0, vpos, hpos;
6613 	long r;
6614 	unsigned long flags;
6615 	struct amdgpu_bo *abo;
6616 	uint64_t tiling_flags;
6617 	bool tmz_surface = false;
6618 	uint32_t target_vblank, last_flip_vblank;
6619 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6620 	bool pflip_present = false;
6621 	struct {
6622 		struct dc_surface_update surface_updates[MAX_SURFACES];
6623 		struct dc_plane_info plane_infos[MAX_SURFACES];
6624 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6625 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6626 		struct dc_stream_update stream_update;
6627 	} *bundle;
6628 
6629 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6630 
6631 	if (!bundle) {
6632 		dm_error("Failed to allocate update bundle\n");
6633 		goto cleanup;
6634 	}
6635 
6636 	/*
6637 	 * Disable the cursor first if we're disabling all the planes.
6638 	 * It'll remain on the screen after the planes are re-enabled
6639 	 * if we don't.
6640 	 */
6641 	if (acrtc_state->active_planes == 0)
6642 		amdgpu_dm_commit_cursors(state);
6643 
6644 	/* update planes when needed */
6645 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6646 		struct drm_crtc *crtc = new_plane_state->crtc;
6647 		struct drm_crtc_state *new_crtc_state;
6648 		struct drm_framebuffer *fb = new_plane_state->fb;
6649 		bool plane_needs_flip;
6650 		struct dc_plane_state *dc_plane;
6651 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6652 
6653 		/* Cursor plane is handled after stream updates */
6654 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6655 			continue;
6656 
6657 		if (!fb || !crtc || pcrtc != crtc)
6658 			continue;
6659 
6660 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6661 		if (!new_crtc_state->active)
6662 			continue;
6663 
6664 		dc_plane = dm_new_plane_state->dc_state;
6665 
6666 		bundle->surface_updates[planes_count].surface = dc_plane;
6667 		if (new_pcrtc_state->color_mgmt_changed) {
6668 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6669 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6670 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6671 		}
6672 
6673 		fill_dc_scaling_info(new_plane_state,
6674 				     &bundle->scaling_infos[planes_count]);
6675 
6676 		bundle->surface_updates[planes_count].scaling_info =
6677 			&bundle->scaling_infos[planes_count];
6678 
6679 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6680 
6681 		pflip_present = pflip_present || plane_needs_flip;
6682 
6683 		if (!plane_needs_flip) {
6684 			planes_count += 1;
6685 			continue;
6686 		}
6687 
6688 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6689 
6690 		/*
6691 		 * Wait for all fences on this FB. Do limited wait to avoid
6692 		 * deadlock during GPU reset when this fence will not signal
6693 		 * but we hold reservation lock for the BO.
6694 		 */
6695 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6696 							false,
6697 							msecs_to_jiffies(5000));
6698 		if (unlikely(r <= 0))
6699 			DRM_ERROR("Waiting for fences timed out!");
6700 
6701 		/*
6702 		 * TODO This might fail and hence better not used, wait
6703 		 * explicitly on fences instead
6704 		 * and in general should be called for
6705 		 * blocking commit to as per framework helpers
6706 		 */
6707 		r = amdgpu_bo_reserve(abo, true);
6708 		if (unlikely(r != 0))
6709 			DRM_ERROR("failed to reserve buffer before flip\n");
6710 
6711 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6712 
6713 		tmz_surface = amdgpu_bo_encrypted(abo);
6714 
6715 		amdgpu_bo_unreserve(abo);
6716 
6717 		fill_dc_plane_info_and_addr(
6718 			dm->adev, new_plane_state, tiling_flags,
6719 			&bundle->plane_infos[planes_count],
6720 			&bundle->flip_addrs[planes_count].address,
6721 			tmz_surface,
6722 			false);
6723 
6724 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6725 				 new_plane_state->plane->index,
6726 				 bundle->plane_infos[planes_count].dcc.enable);
6727 
6728 		bundle->surface_updates[planes_count].plane_info =
6729 			&bundle->plane_infos[planes_count];
6730 
6731 		/*
6732 		 * Only allow immediate flips for fast updates that don't
6733 		 * change FB pitch, DCC state, rotation or mirroing.
6734 		 */
6735 		bundle->flip_addrs[planes_count].flip_immediate =
6736 			crtc->state->async_flip &&
6737 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6738 
6739 		timestamp_ns = ktime_get_ns();
6740 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6741 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6742 		bundle->surface_updates[planes_count].surface = dc_plane;
6743 
6744 		if (!bundle->surface_updates[planes_count].surface) {
6745 			DRM_ERROR("No surface for CRTC: id=%d\n",
6746 					acrtc_attach->crtc_id);
6747 			continue;
6748 		}
6749 
6750 		if (plane == pcrtc->primary)
6751 			update_freesync_state_on_stream(
6752 				dm,
6753 				acrtc_state,
6754 				acrtc_state->stream,
6755 				dc_plane,
6756 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6757 
6758 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6759 				 __func__,
6760 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6761 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6762 
6763 		planes_count += 1;
6764 
6765 	}
6766 
6767 	if (pflip_present) {
6768 		if (!vrr_active) {
6769 			/* Use old throttling in non-vrr fixed refresh rate mode
6770 			 * to keep flip scheduling based on target vblank counts
6771 			 * working in a backwards compatible way, e.g., for
6772 			 * clients using the GLX_OML_sync_control extension or
6773 			 * DRI3/Present extension with defined target_msc.
6774 			 */
6775 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6776 		}
6777 		else {
6778 			/* For variable refresh rate mode only:
6779 			 * Get vblank of last completed flip to avoid > 1 vrr
6780 			 * flips per video frame by use of throttling, but allow
6781 			 * flip programming anywhere in the possibly large
6782 			 * variable vrr vblank interval for fine-grained flip
6783 			 * timing control and more opportunity to avoid stutter
6784 			 * on late submission of flips.
6785 			 */
6786 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6787 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6788 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6789 		}
6790 
6791 		target_vblank = last_flip_vblank + wait_for_vblank;
6792 
6793 		/*
6794 		 * Wait until we're out of the vertical blank period before the one
6795 		 * targeted by the flip
6796 		 */
6797 		while ((acrtc_attach->enabled &&
6798 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6799 							    0, &vpos, &hpos, NULL,
6800 							    NULL, &pcrtc->hwmode)
6801 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6802 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6803 			(int)(target_vblank -
6804 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6805 			usleep_range(1000, 1100);
6806 		}
6807 
6808 		if (acrtc_attach->base.state->event) {
6809 			drm_crtc_vblank_get(pcrtc);
6810 
6811 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6812 
6813 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6814 			prepare_flip_isr(acrtc_attach);
6815 
6816 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6817 		}
6818 
6819 		if (acrtc_state->stream) {
6820 			if (acrtc_state->freesync_vrr_info_changed)
6821 				bundle->stream_update.vrr_infopacket =
6822 					&acrtc_state->stream->vrr_infopacket;
6823 		}
6824 	}
6825 
6826 	/* Update the planes if changed or disable if we don't have any. */
6827 	if ((planes_count || acrtc_state->active_planes == 0) &&
6828 		acrtc_state->stream) {
6829 		bundle->stream_update.stream = acrtc_state->stream;
6830 		if (new_pcrtc_state->mode_changed) {
6831 			bundle->stream_update.src = acrtc_state->stream->src;
6832 			bundle->stream_update.dst = acrtc_state->stream->dst;
6833 		}
6834 
6835 		if (new_pcrtc_state->color_mgmt_changed) {
6836 			/*
6837 			 * TODO: This isn't fully correct since we've actually
6838 			 * already modified the stream in place.
6839 			 */
6840 			bundle->stream_update.gamut_remap =
6841 				&acrtc_state->stream->gamut_remap_matrix;
6842 			bundle->stream_update.output_csc_transform =
6843 				&acrtc_state->stream->csc_color_matrix;
6844 			bundle->stream_update.out_transfer_func =
6845 				acrtc_state->stream->out_transfer_func;
6846 		}
6847 
6848 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6849 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6850 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6851 
6852 		/*
6853 		 * If FreeSync state on the stream has changed then we need to
6854 		 * re-adjust the min/max bounds now that DC doesn't handle this
6855 		 * as part of commit.
6856 		 */
6857 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6858 		    amdgpu_dm_vrr_active(acrtc_state)) {
6859 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6860 			dc_stream_adjust_vmin_vmax(
6861 				dm->dc, acrtc_state->stream,
6862 				&acrtc_state->vrr_params.adjust);
6863 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6864 		}
6865 		mutex_lock(&dm->dc_lock);
6866 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6867 				acrtc_state->stream->link->psr_settings.psr_allow_active)
6868 			amdgpu_dm_psr_disable(acrtc_state->stream);
6869 
6870 		dc_commit_updates_for_stream(dm->dc,
6871 						     bundle->surface_updates,
6872 						     planes_count,
6873 						     acrtc_state->stream,
6874 						     &bundle->stream_update,
6875 						     dc_state);
6876 
6877 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6878 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
6879 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6880 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6881 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6882 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6883 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
6884 			amdgpu_dm_psr_enable(acrtc_state->stream);
6885 		}
6886 
6887 		mutex_unlock(&dm->dc_lock);
6888 	}
6889 
6890 	/*
6891 	 * Update cursor state *after* programming all the planes.
6892 	 * This avoids redundant programming in the case where we're going
6893 	 * to be disabling a single plane - those pipes are being disabled.
6894 	 */
6895 	if (acrtc_state->active_planes)
6896 		amdgpu_dm_commit_cursors(state);
6897 
6898 cleanup:
6899 	kfree(bundle);
6900 }
6901 
6902 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6903 				   struct drm_atomic_state *state)
6904 {
6905 	struct amdgpu_device *adev = dev->dev_private;
6906 	struct amdgpu_dm_connector *aconnector;
6907 	struct drm_connector *connector;
6908 	struct drm_connector_state *old_con_state, *new_con_state;
6909 	struct drm_crtc_state *new_crtc_state;
6910 	struct dm_crtc_state *new_dm_crtc_state;
6911 	const struct dc_stream_status *status;
6912 	int i, inst;
6913 
6914 	/* Notify device removals. */
6915 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6916 		if (old_con_state->crtc != new_con_state->crtc) {
6917 			/* CRTC changes require notification. */
6918 			goto notify;
6919 		}
6920 
6921 		if (!new_con_state->crtc)
6922 			continue;
6923 
6924 		new_crtc_state = drm_atomic_get_new_crtc_state(
6925 			state, new_con_state->crtc);
6926 
6927 		if (!new_crtc_state)
6928 			continue;
6929 
6930 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6931 			continue;
6932 
6933 	notify:
6934 		aconnector = to_amdgpu_dm_connector(connector);
6935 
6936 		mutex_lock(&adev->dm.audio_lock);
6937 		inst = aconnector->audio_inst;
6938 		aconnector->audio_inst = -1;
6939 		mutex_unlock(&adev->dm.audio_lock);
6940 
6941 		amdgpu_dm_audio_eld_notify(adev, inst);
6942 	}
6943 
6944 	/* Notify audio device additions. */
6945 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6946 		if (!new_con_state->crtc)
6947 			continue;
6948 
6949 		new_crtc_state = drm_atomic_get_new_crtc_state(
6950 			state, new_con_state->crtc);
6951 
6952 		if (!new_crtc_state)
6953 			continue;
6954 
6955 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6956 			continue;
6957 
6958 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6959 		if (!new_dm_crtc_state->stream)
6960 			continue;
6961 
6962 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6963 		if (!status)
6964 			continue;
6965 
6966 		aconnector = to_amdgpu_dm_connector(connector);
6967 
6968 		mutex_lock(&adev->dm.audio_lock);
6969 		inst = status->audio_inst;
6970 		aconnector->audio_inst = inst;
6971 		mutex_unlock(&adev->dm.audio_lock);
6972 
6973 		amdgpu_dm_audio_eld_notify(adev, inst);
6974 	}
6975 }
6976 
6977 /*
6978  * Enable interrupts on CRTCs that are newly active, undergone
6979  * a modeset, or have active planes again.
6980  *
6981  * Done in two passes, based on the for_modeset flag:
6982  * Pass 1: For CRTCs going through modeset
6983  * Pass 2: For CRTCs going from 0 to n active planes
6984  *
6985  * Interrupts can only be enabled after the planes are programmed,
6986  * so this requires a two-pass approach since we don't want to
6987  * just defer the interrupts until after commit planes every time.
6988  */
6989 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6990 					     struct drm_atomic_state *state,
6991 					     bool for_modeset)
6992 {
6993 	struct amdgpu_device *adev = dev->dev_private;
6994 	struct drm_crtc *crtc;
6995 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6996 	int i;
6997 #ifdef CONFIG_DEBUG_FS
6998 	enum amdgpu_dm_pipe_crc_source source;
6999 #endif
7000 
7001 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7002 				      new_crtc_state, i) {
7003 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7004 		struct dm_crtc_state *dm_new_crtc_state =
7005 			to_dm_crtc_state(new_crtc_state);
7006 		struct dm_crtc_state *dm_old_crtc_state =
7007 			to_dm_crtc_state(old_crtc_state);
7008 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7009 		bool run_pass;
7010 
7011 		run_pass = (for_modeset && modeset) ||
7012 			   (!for_modeset && !modeset &&
7013 			    !dm_old_crtc_state->interrupts_enabled);
7014 
7015 		if (!run_pass)
7016 			continue;
7017 
7018 		if (!dm_new_crtc_state->interrupts_enabled)
7019 			continue;
7020 
7021 		manage_dm_interrupts(adev, acrtc, true);
7022 
7023 #ifdef CONFIG_DEBUG_FS
7024 		/* The stream has changed so CRC capture needs to re-enabled. */
7025 		source = dm_new_crtc_state->crc_src;
7026 		if (amdgpu_dm_is_valid_crc_source(source)) {
7027 			amdgpu_dm_crtc_configure_crc_source(
7028 				crtc, dm_new_crtc_state,
7029 				dm_new_crtc_state->crc_src);
7030 		}
7031 #endif
7032 	}
7033 }
7034 
7035 /*
7036  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7037  * @crtc_state: the DRM CRTC state
7038  * @stream_state: the DC stream state.
7039  *
7040  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7041  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7042  */
7043 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7044 						struct dc_stream_state *stream_state)
7045 {
7046 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7047 }
7048 
7049 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7050 				   struct drm_atomic_state *state,
7051 				   bool nonblock)
7052 {
7053 	struct drm_crtc *crtc;
7054 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7055 	struct amdgpu_device *adev = dev->dev_private;
7056 	int i;
7057 
7058 	/*
7059 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7060 	 * a modeset, being disabled, or have no active planes.
7061 	 *
7062 	 * It's done in atomic commit rather than commit tail for now since
7063 	 * some of these interrupt handlers access the current CRTC state and
7064 	 * potentially the stream pointer itself.
7065 	 *
7066 	 * Since the atomic state is swapped within atomic commit and not within
7067 	 * commit tail this would leave to new state (that hasn't been committed yet)
7068 	 * being accesssed from within the handlers.
7069 	 *
7070 	 * TODO: Fix this so we can do this in commit tail and not have to block
7071 	 * in atomic check.
7072 	 */
7073 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7074 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7075 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7076 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7077 
7078 		if (dm_old_crtc_state->interrupts_enabled &&
7079 		    (!dm_new_crtc_state->interrupts_enabled ||
7080 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7081 			manage_dm_interrupts(adev, acrtc, false);
7082 	}
7083 	/*
7084 	 * Add check here for SoC's that support hardware cursor plane, to
7085 	 * unset legacy_cursor_update
7086 	 */
7087 
7088 	return drm_atomic_helper_commit(dev, state, nonblock);
7089 
7090 	/*TODO Handle EINTR, reenable IRQ*/
7091 }
7092 
7093 /**
7094  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7095  * @state: The atomic state to commit
7096  *
7097  * This will tell DC to commit the constructed DC state from atomic_check,
7098  * programming the hardware. Any failures here implies a hardware failure, since
7099  * atomic check should have filtered anything non-kosher.
7100  */
7101 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7102 {
7103 	struct drm_device *dev = state->dev;
7104 	struct amdgpu_device *adev = dev->dev_private;
7105 	struct amdgpu_display_manager *dm = &adev->dm;
7106 	struct dm_atomic_state *dm_state;
7107 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7108 	uint32_t i, j;
7109 	struct drm_crtc *crtc;
7110 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7111 	unsigned long flags;
7112 	bool wait_for_vblank = true;
7113 	struct drm_connector *connector;
7114 	struct drm_connector_state *old_con_state, *new_con_state;
7115 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7116 	int crtc_disable_count = 0;
7117 
7118 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7119 
7120 	dm_state = dm_atomic_get_new_state(state);
7121 	if (dm_state && dm_state->context) {
7122 		dc_state = dm_state->context;
7123 	} else {
7124 		/* No state changes, retain current state. */
7125 		dc_state_temp = dc_create_state(dm->dc);
7126 		ASSERT(dc_state_temp);
7127 		dc_state = dc_state_temp;
7128 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7129 	}
7130 
7131 	/* update changed items */
7132 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7133 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7134 
7135 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7136 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7137 
7138 		DRM_DEBUG_DRIVER(
7139 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7140 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7141 			"connectors_changed:%d\n",
7142 			acrtc->crtc_id,
7143 			new_crtc_state->enable,
7144 			new_crtc_state->active,
7145 			new_crtc_state->planes_changed,
7146 			new_crtc_state->mode_changed,
7147 			new_crtc_state->active_changed,
7148 			new_crtc_state->connectors_changed);
7149 
7150 		/* Copy all transient state flags into dc state */
7151 		if (dm_new_crtc_state->stream) {
7152 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7153 							    dm_new_crtc_state->stream);
7154 		}
7155 
7156 		/* handles headless hotplug case, updating new_state and
7157 		 * aconnector as needed
7158 		 */
7159 
7160 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7161 
7162 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7163 
7164 			if (!dm_new_crtc_state->stream) {
7165 				/*
7166 				 * this could happen because of issues with
7167 				 * userspace notifications delivery.
7168 				 * In this case userspace tries to set mode on
7169 				 * display which is disconnected in fact.
7170 				 * dc_sink is NULL in this case on aconnector.
7171 				 * We expect reset mode will come soon.
7172 				 *
7173 				 * This can also happen when unplug is done
7174 				 * during resume sequence ended
7175 				 *
7176 				 * In this case, we want to pretend we still
7177 				 * have a sink to keep the pipe running so that
7178 				 * hw state is consistent with the sw state
7179 				 */
7180 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7181 						__func__, acrtc->base.base.id);
7182 				continue;
7183 			}
7184 
7185 			if (dm_old_crtc_state->stream)
7186 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7187 
7188 			pm_runtime_get_noresume(dev->dev);
7189 
7190 			acrtc->enabled = true;
7191 			acrtc->hw_mode = new_crtc_state->mode;
7192 			crtc->hwmode = new_crtc_state->mode;
7193 		} else if (modereset_required(new_crtc_state)) {
7194 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7195 			/* i.e. reset mode */
7196 			if (dm_old_crtc_state->stream) {
7197 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7198 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7199 
7200 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7201 			}
7202 		}
7203 	} /* for_each_crtc_in_state() */
7204 
7205 	if (dc_state) {
7206 		dm_enable_per_frame_crtc_master_sync(dc_state);
7207 		mutex_lock(&dm->dc_lock);
7208 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7209 		mutex_unlock(&dm->dc_lock);
7210 	}
7211 
7212 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7213 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7214 
7215 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7216 
7217 		if (dm_new_crtc_state->stream != NULL) {
7218 			const struct dc_stream_status *status =
7219 					dc_stream_get_status(dm_new_crtc_state->stream);
7220 
7221 			if (!status)
7222 				status = dc_stream_get_status_from_state(dc_state,
7223 									 dm_new_crtc_state->stream);
7224 
7225 			if (!status)
7226 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7227 			else
7228 				acrtc->otg_inst = status->primary_otg_inst;
7229 		}
7230 	}
7231 #ifdef CONFIG_DRM_AMD_DC_HDCP
7232 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7233 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7234 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7235 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7236 
7237 		new_crtc_state = NULL;
7238 
7239 		if (acrtc)
7240 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7241 
7242 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7243 
7244 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7245 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7246 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7247 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7248 			continue;
7249 		}
7250 
7251 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7252 			hdcp_update_display(
7253 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7254 				new_con_state->hdcp_content_type,
7255 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7256 													 : false);
7257 	}
7258 #endif
7259 
7260 	/* Handle connector state changes */
7261 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7262 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7263 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7264 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7265 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7266 		struct dc_stream_update stream_update;
7267 		struct dc_info_packet hdr_packet;
7268 		struct dc_stream_status *status = NULL;
7269 		bool abm_changed, hdr_changed, scaling_changed;
7270 
7271 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7272 		memset(&stream_update, 0, sizeof(stream_update));
7273 
7274 		if (acrtc) {
7275 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7276 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7277 		}
7278 
7279 		/* Skip any modesets/resets */
7280 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7281 			continue;
7282 
7283 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7284 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7285 
7286 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7287 							     dm_old_con_state);
7288 
7289 		abm_changed = dm_new_crtc_state->abm_level !=
7290 			      dm_old_crtc_state->abm_level;
7291 
7292 		hdr_changed =
7293 			is_hdr_metadata_different(old_con_state, new_con_state);
7294 
7295 		if (!scaling_changed && !abm_changed && !hdr_changed)
7296 			continue;
7297 
7298 		stream_update.stream = dm_new_crtc_state->stream;
7299 		if (scaling_changed) {
7300 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7301 					dm_new_con_state, dm_new_crtc_state->stream);
7302 
7303 			stream_update.src = dm_new_crtc_state->stream->src;
7304 			stream_update.dst = dm_new_crtc_state->stream->dst;
7305 		}
7306 
7307 		if (abm_changed) {
7308 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7309 
7310 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7311 		}
7312 
7313 		if (hdr_changed) {
7314 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7315 			stream_update.hdr_static_metadata = &hdr_packet;
7316 		}
7317 
7318 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7319 		WARN_ON(!status);
7320 		WARN_ON(!status->plane_count);
7321 
7322 		/*
7323 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7324 		 * Here we create an empty update on each plane.
7325 		 * To fix this, DC should permit updating only stream properties.
7326 		 */
7327 		for (j = 0; j < status->plane_count; j++)
7328 			dummy_updates[j].surface = status->plane_states[0];
7329 
7330 
7331 		mutex_lock(&dm->dc_lock);
7332 		dc_commit_updates_for_stream(dm->dc,
7333 						     dummy_updates,
7334 						     status->plane_count,
7335 						     dm_new_crtc_state->stream,
7336 						     &stream_update,
7337 						     dc_state);
7338 		mutex_unlock(&dm->dc_lock);
7339 	}
7340 
7341 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7342 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7343 				      new_crtc_state, i) {
7344 		if (old_crtc_state->active && !new_crtc_state->active)
7345 			crtc_disable_count++;
7346 
7347 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7348 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7349 
7350 		/* Update freesync active state. */
7351 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7352 
7353 		/* Handle vrr on->off / off->on transitions */
7354 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7355 						dm_new_crtc_state);
7356 	}
7357 
7358 	/* Enable interrupts for CRTCs going through a modeset. */
7359 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7360 
7361 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7362 		if (new_crtc_state->async_flip)
7363 			wait_for_vblank = false;
7364 
7365 	/* update planes when needed per crtc*/
7366 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7367 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7368 
7369 		if (dm_new_crtc_state->stream)
7370 			amdgpu_dm_commit_planes(state, dc_state, dev,
7371 						dm, crtc, wait_for_vblank);
7372 	}
7373 
7374 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7375 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7376 
7377 	/* Update audio instances for each connector. */
7378 	amdgpu_dm_commit_audio(dev, state);
7379 
7380 	/*
7381 	 * send vblank event on all events not handled in flip and
7382 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7383 	 */
7384 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7385 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7386 
7387 		if (new_crtc_state->event)
7388 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7389 
7390 		new_crtc_state->event = NULL;
7391 	}
7392 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7393 
7394 	/* Signal HW programming completion */
7395 	drm_atomic_helper_commit_hw_done(state);
7396 
7397 	if (wait_for_vblank)
7398 		drm_atomic_helper_wait_for_flip_done(dev, state);
7399 
7400 	drm_atomic_helper_cleanup_planes(dev, state);
7401 
7402 	/*
7403 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7404 	 * so we can put the GPU into runtime suspend if we're not driving any
7405 	 * displays anymore
7406 	 */
7407 	for (i = 0; i < crtc_disable_count; i++)
7408 		pm_runtime_put_autosuspend(dev->dev);
7409 	pm_runtime_mark_last_busy(dev->dev);
7410 
7411 	if (dc_state_temp)
7412 		dc_release_state(dc_state_temp);
7413 }
7414 
7415 
7416 static int dm_force_atomic_commit(struct drm_connector *connector)
7417 {
7418 	int ret = 0;
7419 	struct drm_device *ddev = connector->dev;
7420 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7421 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7422 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7423 	struct drm_connector_state *conn_state;
7424 	struct drm_crtc_state *crtc_state;
7425 	struct drm_plane_state *plane_state;
7426 
7427 	if (!state)
7428 		return -ENOMEM;
7429 
7430 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7431 
7432 	/* Construct an atomic state to restore previous display setting */
7433 
7434 	/*
7435 	 * Attach connectors to drm_atomic_state
7436 	 */
7437 	conn_state = drm_atomic_get_connector_state(state, connector);
7438 
7439 	ret = PTR_ERR_OR_ZERO(conn_state);
7440 	if (ret)
7441 		goto err;
7442 
7443 	/* Attach crtc to drm_atomic_state*/
7444 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7445 
7446 	ret = PTR_ERR_OR_ZERO(crtc_state);
7447 	if (ret)
7448 		goto err;
7449 
7450 	/* force a restore */
7451 	crtc_state->mode_changed = true;
7452 
7453 	/* Attach plane to drm_atomic_state */
7454 	plane_state = drm_atomic_get_plane_state(state, plane);
7455 
7456 	ret = PTR_ERR_OR_ZERO(plane_state);
7457 	if (ret)
7458 		goto err;
7459 
7460 
7461 	/* Call commit internally with the state we just constructed */
7462 	ret = drm_atomic_commit(state);
7463 	if (!ret)
7464 		return 0;
7465 
7466 err:
7467 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7468 	drm_atomic_state_put(state);
7469 
7470 	return ret;
7471 }
7472 
7473 /*
7474  * This function handles all cases when set mode does not come upon hotplug.
7475  * This includes when a display is unplugged then plugged back into the
7476  * same port and when running without usermode desktop manager supprot
7477  */
7478 void dm_restore_drm_connector_state(struct drm_device *dev,
7479 				    struct drm_connector *connector)
7480 {
7481 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7482 	struct amdgpu_crtc *disconnected_acrtc;
7483 	struct dm_crtc_state *acrtc_state;
7484 
7485 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7486 		return;
7487 
7488 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7489 	if (!disconnected_acrtc)
7490 		return;
7491 
7492 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7493 	if (!acrtc_state->stream)
7494 		return;
7495 
7496 	/*
7497 	 * If the previous sink is not released and different from the current,
7498 	 * we deduce we are in a state where we can not rely on usermode call
7499 	 * to turn on the display, so we do it here
7500 	 */
7501 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7502 		dm_force_atomic_commit(&aconnector->base);
7503 }
7504 
7505 /*
7506  * Grabs all modesetting locks to serialize against any blocking commits,
7507  * Waits for completion of all non blocking commits.
7508  */
7509 static int do_aquire_global_lock(struct drm_device *dev,
7510 				 struct drm_atomic_state *state)
7511 {
7512 	struct drm_crtc *crtc;
7513 	struct drm_crtc_commit *commit;
7514 	long ret;
7515 
7516 	/*
7517 	 * Adding all modeset locks to aquire_ctx will
7518 	 * ensure that when the framework release it the
7519 	 * extra locks we are locking here will get released to
7520 	 */
7521 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7522 	if (ret)
7523 		return ret;
7524 
7525 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7526 		spin_lock(&crtc->commit_lock);
7527 		commit = list_first_entry_or_null(&crtc->commit_list,
7528 				struct drm_crtc_commit, commit_entry);
7529 		if (commit)
7530 			drm_crtc_commit_get(commit);
7531 		spin_unlock(&crtc->commit_lock);
7532 
7533 		if (!commit)
7534 			continue;
7535 
7536 		/*
7537 		 * Make sure all pending HW programming completed and
7538 		 * page flips done
7539 		 */
7540 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7541 
7542 		if (ret > 0)
7543 			ret = wait_for_completion_interruptible_timeout(
7544 					&commit->flip_done, 10*HZ);
7545 
7546 		if (ret == 0)
7547 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7548 				  "timed out\n", crtc->base.id, crtc->name);
7549 
7550 		drm_crtc_commit_put(commit);
7551 	}
7552 
7553 	return ret < 0 ? ret : 0;
7554 }
7555 
7556 static void get_freesync_config_for_crtc(
7557 	struct dm_crtc_state *new_crtc_state,
7558 	struct dm_connector_state *new_con_state)
7559 {
7560 	struct mod_freesync_config config = {0};
7561 	struct amdgpu_dm_connector *aconnector =
7562 			to_amdgpu_dm_connector(new_con_state->base.connector);
7563 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7564 	int vrefresh = drm_mode_vrefresh(mode);
7565 
7566 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7567 					vrefresh >= aconnector->min_vfreq &&
7568 					vrefresh <= aconnector->max_vfreq;
7569 
7570 	if (new_crtc_state->vrr_supported) {
7571 		new_crtc_state->stream->ignore_msa_timing_param = true;
7572 		config.state = new_crtc_state->base.vrr_enabled ?
7573 				VRR_STATE_ACTIVE_VARIABLE :
7574 				VRR_STATE_INACTIVE;
7575 		config.min_refresh_in_uhz =
7576 				aconnector->min_vfreq * 1000000;
7577 		config.max_refresh_in_uhz =
7578 				aconnector->max_vfreq * 1000000;
7579 		config.vsif_supported = true;
7580 		config.btr = true;
7581 	}
7582 
7583 	new_crtc_state->freesync_config = config;
7584 }
7585 
7586 static void reset_freesync_config_for_crtc(
7587 	struct dm_crtc_state *new_crtc_state)
7588 {
7589 	new_crtc_state->vrr_supported = false;
7590 
7591 	memset(&new_crtc_state->vrr_params, 0,
7592 	       sizeof(new_crtc_state->vrr_params));
7593 	memset(&new_crtc_state->vrr_infopacket, 0,
7594 	       sizeof(new_crtc_state->vrr_infopacket));
7595 }
7596 
7597 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7598 				struct drm_atomic_state *state,
7599 				struct drm_crtc *crtc,
7600 				struct drm_crtc_state *old_crtc_state,
7601 				struct drm_crtc_state *new_crtc_state,
7602 				bool enable,
7603 				bool *lock_and_validation_needed)
7604 {
7605 	struct dm_atomic_state *dm_state = NULL;
7606 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7607 	struct dc_stream_state *new_stream;
7608 	int ret = 0;
7609 
7610 	/*
7611 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7612 	 * update changed items
7613 	 */
7614 	struct amdgpu_crtc *acrtc = NULL;
7615 	struct amdgpu_dm_connector *aconnector = NULL;
7616 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7617 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7618 
7619 	new_stream = NULL;
7620 
7621 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7622 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7623 	acrtc = to_amdgpu_crtc(crtc);
7624 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7625 
7626 	/* TODO This hack should go away */
7627 	if (aconnector && enable) {
7628 		/* Make sure fake sink is created in plug-in scenario */
7629 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7630 							    &aconnector->base);
7631 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7632 							    &aconnector->base);
7633 
7634 		if (IS_ERR(drm_new_conn_state)) {
7635 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7636 			goto fail;
7637 		}
7638 
7639 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7640 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7641 
7642 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7643 			goto skip_modeset;
7644 
7645 		new_stream = create_stream_for_sink(aconnector,
7646 						     &new_crtc_state->mode,
7647 						    dm_new_conn_state,
7648 						    dm_old_crtc_state->stream);
7649 
7650 		/*
7651 		 * we can have no stream on ACTION_SET if a display
7652 		 * was disconnected during S3, in this case it is not an
7653 		 * error, the OS will be updated after detection, and
7654 		 * will do the right thing on next atomic commit
7655 		 */
7656 
7657 		if (!new_stream) {
7658 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7659 					__func__, acrtc->base.base.id);
7660 			ret = -ENOMEM;
7661 			goto fail;
7662 		}
7663 
7664 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7665 
7666 		ret = fill_hdr_info_packet(drm_new_conn_state,
7667 					   &new_stream->hdr_static_metadata);
7668 		if (ret)
7669 			goto fail;
7670 
7671 		/*
7672 		 * If we already removed the old stream from the context
7673 		 * (and set the new stream to NULL) then we can't reuse
7674 		 * the old stream even if the stream and scaling are unchanged.
7675 		 * We'll hit the BUG_ON and black screen.
7676 		 *
7677 		 * TODO: Refactor this function to allow this check to work
7678 		 * in all conditions.
7679 		 */
7680 		if (dm_new_crtc_state->stream &&
7681 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7682 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7683 			new_crtc_state->mode_changed = false;
7684 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7685 					 new_crtc_state->mode_changed);
7686 		}
7687 	}
7688 
7689 	/* mode_changed flag may get updated above, need to check again */
7690 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7691 		goto skip_modeset;
7692 
7693 	DRM_DEBUG_DRIVER(
7694 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7695 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7696 		"connectors_changed:%d\n",
7697 		acrtc->crtc_id,
7698 		new_crtc_state->enable,
7699 		new_crtc_state->active,
7700 		new_crtc_state->planes_changed,
7701 		new_crtc_state->mode_changed,
7702 		new_crtc_state->active_changed,
7703 		new_crtc_state->connectors_changed);
7704 
7705 	/* Remove stream for any changed/disabled CRTC */
7706 	if (!enable) {
7707 
7708 		if (!dm_old_crtc_state->stream)
7709 			goto skip_modeset;
7710 
7711 		ret = dm_atomic_get_state(state, &dm_state);
7712 		if (ret)
7713 			goto fail;
7714 
7715 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7716 				crtc->base.id);
7717 
7718 		/* i.e. reset mode */
7719 		if (dc_remove_stream_from_ctx(
7720 				dm->dc,
7721 				dm_state->context,
7722 				dm_old_crtc_state->stream) != DC_OK) {
7723 			ret = -EINVAL;
7724 			goto fail;
7725 		}
7726 
7727 		dc_stream_release(dm_old_crtc_state->stream);
7728 		dm_new_crtc_state->stream = NULL;
7729 
7730 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7731 
7732 		*lock_and_validation_needed = true;
7733 
7734 	} else {/* Add stream for any updated/enabled CRTC */
7735 		/*
7736 		 * Quick fix to prevent NULL pointer on new_stream when
7737 		 * added MST connectors not found in existing crtc_state in the chained mode
7738 		 * TODO: need to dig out the root cause of that
7739 		 */
7740 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7741 			goto skip_modeset;
7742 
7743 		if (modereset_required(new_crtc_state))
7744 			goto skip_modeset;
7745 
7746 		if (modeset_required(new_crtc_state, new_stream,
7747 				     dm_old_crtc_state->stream)) {
7748 
7749 			WARN_ON(dm_new_crtc_state->stream);
7750 
7751 			ret = dm_atomic_get_state(state, &dm_state);
7752 			if (ret)
7753 				goto fail;
7754 
7755 			dm_new_crtc_state->stream = new_stream;
7756 
7757 			dc_stream_retain(new_stream);
7758 
7759 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7760 						crtc->base.id);
7761 
7762 			if (dc_add_stream_to_ctx(
7763 					dm->dc,
7764 					dm_state->context,
7765 					dm_new_crtc_state->stream) != DC_OK) {
7766 				ret = -EINVAL;
7767 				goto fail;
7768 			}
7769 
7770 			*lock_and_validation_needed = true;
7771 		}
7772 	}
7773 
7774 skip_modeset:
7775 	/* Release extra reference */
7776 	if (new_stream)
7777 		 dc_stream_release(new_stream);
7778 
7779 	/*
7780 	 * We want to do dc stream updates that do not require a
7781 	 * full modeset below.
7782 	 */
7783 	if (!(enable && aconnector && new_crtc_state->enable &&
7784 	      new_crtc_state->active))
7785 		return 0;
7786 	/*
7787 	 * Given above conditions, the dc state cannot be NULL because:
7788 	 * 1. We're in the process of enabling CRTCs (just been added
7789 	 *    to the dc context, or already is on the context)
7790 	 * 2. Has a valid connector attached, and
7791 	 * 3. Is currently active and enabled.
7792 	 * => The dc stream state currently exists.
7793 	 */
7794 	BUG_ON(dm_new_crtc_state->stream == NULL);
7795 
7796 	/* Scaling or underscan settings */
7797 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7798 		update_stream_scaling_settings(
7799 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7800 
7801 	/* ABM settings */
7802 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7803 
7804 	/*
7805 	 * Color management settings. We also update color properties
7806 	 * when a modeset is needed, to ensure it gets reprogrammed.
7807 	 */
7808 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7809 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7810 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7811 		if (ret)
7812 			goto fail;
7813 	}
7814 
7815 	/* Update Freesync settings. */
7816 	get_freesync_config_for_crtc(dm_new_crtc_state,
7817 				     dm_new_conn_state);
7818 
7819 	return ret;
7820 
7821 fail:
7822 	if (new_stream)
7823 		dc_stream_release(new_stream);
7824 	return ret;
7825 }
7826 
7827 static bool should_reset_plane(struct drm_atomic_state *state,
7828 			       struct drm_plane *plane,
7829 			       struct drm_plane_state *old_plane_state,
7830 			       struct drm_plane_state *new_plane_state)
7831 {
7832 	struct drm_plane *other;
7833 	struct drm_plane_state *old_other_state, *new_other_state;
7834 	struct drm_crtc_state *new_crtc_state;
7835 	int i;
7836 
7837 	/*
7838 	 * TODO: Remove this hack once the checks below are sufficient
7839 	 * enough to determine when we need to reset all the planes on
7840 	 * the stream.
7841 	 */
7842 	if (state->allow_modeset)
7843 		return true;
7844 
7845 	/* Exit early if we know that we're adding or removing the plane. */
7846 	if (old_plane_state->crtc != new_plane_state->crtc)
7847 		return true;
7848 
7849 	/* old crtc == new_crtc == NULL, plane not in context. */
7850 	if (!new_plane_state->crtc)
7851 		return false;
7852 
7853 	new_crtc_state =
7854 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7855 
7856 	if (!new_crtc_state)
7857 		return true;
7858 
7859 	/* CRTC Degamma changes currently require us to recreate planes. */
7860 	if (new_crtc_state->color_mgmt_changed)
7861 		return true;
7862 
7863 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7864 		return true;
7865 
7866 	/*
7867 	 * If there are any new primary or overlay planes being added or
7868 	 * removed then the z-order can potentially change. To ensure
7869 	 * correct z-order and pipe acquisition the current DC architecture
7870 	 * requires us to remove and recreate all existing planes.
7871 	 *
7872 	 * TODO: Come up with a more elegant solution for this.
7873 	 */
7874 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7875 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7876 			continue;
7877 
7878 		if (old_other_state->crtc != new_plane_state->crtc &&
7879 		    new_other_state->crtc != new_plane_state->crtc)
7880 			continue;
7881 
7882 		if (old_other_state->crtc != new_other_state->crtc)
7883 			return true;
7884 
7885 		/* TODO: Remove this once we can handle fast format changes. */
7886 		if (old_other_state->fb && new_other_state->fb &&
7887 		    old_other_state->fb->format != new_other_state->fb->format)
7888 			return true;
7889 	}
7890 
7891 	return false;
7892 }
7893 
7894 static int dm_update_plane_state(struct dc *dc,
7895 				 struct drm_atomic_state *state,
7896 				 struct drm_plane *plane,
7897 				 struct drm_plane_state *old_plane_state,
7898 				 struct drm_plane_state *new_plane_state,
7899 				 bool enable,
7900 				 bool *lock_and_validation_needed)
7901 {
7902 
7903 	struct dm_atomic_state *dm_state = NULL;
7904 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7905 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7906 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7907 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7908 	struct amdgpu_crtc *new_acrtc;
7909 	bool needs_reset;
7910 	int ret = 0;
7911 
7912 
7913 	new_plane_crtc = new_plane_state->crtc;
7914 	old_plane_crtc = old_plane_state->crtc;
7915 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7916 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7917 
7918 	/*TODO Implement better atomic check for cursor plane */
7919 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7920 		if (!enable || !new_plane_crtc ||
7921 			drm_atomic_plane_disabling(plane->state, new_plane_state))
7922 			return 0;
7923 
7924 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7925 
7926 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7927 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7928 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7929 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
7930 			return -EINVAL;
7931 		}
7932 
7933 		if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
7934 			new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
7935 			DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
7936 							 new_plane_state->crtc_x, new_plane_state->crtc_y);
7937 			return -EINVAL;
7938 		}
7939 
7940 		return 0;
7941 	}
7942 
7943 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7944 					 new_plane_state);
7945 
7946 	/* Remove any changed/removed planes */
7947 	if (!enable) {
7948 		if (!needs_reset)
7949 			return 0;
7950 
7951 		if (!old_plane_crtc)
7952 			return 0;
7953 
7954 		old_crtc_state = drm_atomic_get_old_crtc_state(
7955 				state, old_plane_crtc);
7956 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7957 
7958 		if (!dm_old_crtc_state->stream)
7959 			return 0;
7960 
7961 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7962 				plane->base.id, old_plane_crtc->base.id);
7963 
7964 		ret = dm_atomic_get_state(state, &dm_state);
7965 		if (ret)
7966 			return ret;
7967 
7968 		if (!dc_remove_plane_from_context(
7969 				dc,
7970 				dm_old_crtc_state->stream,
7971 				dm_old_plane_state->dc_state,
7972 				dm_state->context)) {
7973 
7974 			ret = EINVAL;
7975 			return ret;
7976 		}
7977 
7978 
7979 		dc_plane_state_release(dm_old_plane_state->dc_state);
7980 		dm_new_plane_state->dc_state = NULL;
7981 
7982 		*lock_and_validation_needed = true;
7983 
7984 	} else { /* Add new planes */
7985 		struct dc_plane_state *dc_new_plane_state;
7986 
7987 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7988 			return 0;
7989 
7990 		if (!new_plane_crtc)
7991 			return 0;
7992 
7993 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7994 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7995 
7996 		if (!dm_new_crtc_state->stream)
7997 			return 0;
7998 
7999 		if (!needs_reset)
8000 			return 0;
8001 
8002 		WARN_ON(dm_new_plane_state->dc_state);
8003 
8004 		dc_new_plane_state = dc_create_plane_state(dc);
8005 		if (!dc_new_plane_state)
8006 			return -ENOMEM;
8007 
8008 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8009 				plane->base.id, new_plane_crtc->base.id);
8010 
8011 		ret = fill_dc_plane_attributes(
8012 			new_plane_crtc->dev->dev_private,
8013 			dc_new_plane_state,
8014 			new_plane_state,
8015 			new_crtc_state);
8016 		if (ret) {
8017 			dc_plane_state_release(dc_new_plane_state);
8018 			return ret;
8019 		}
8020 
8021 		ret = dm_atomic_get_state(state, &dm_state);
8022 		if (ret) {
8023 			dc_plane_state_release(dc_new_plane_state);
8024 			return ret;
8025 		}
8026 
8027 		/*
8028 		 * Any atomic check errors that occur after this will
8029 		 * not need a release. The plane state will be attached
8030 		 * to the stream, and therefore part of the atomic
8031 		 * state. It'll be released when the atomic state is
8032 		 * cleaned.
8033 		 */
8034 		if (!dc_add_plane_to_context(
8035 				dc,
8036 				dm_new_crtc_state->stream,
8037 				dc_new_plane_state,
8038 				dm_state->context)) {
8039 
8040 			dc_plane_state_release(dc_new_plane_state);
8041 			return -EINVAL;
8042 		}
8043 
8044 		dm_new_plane_state->dc_state = dc_new_plane_state;
8045 
8046 		/* Tell DC to do a full surface update every time there
8047 		 * is a plane change. Inefficient, but works for now.
8048 		 */
8049 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8050 
8051 		*lock_and_validation_needed = true;
8052 	}
8053 
8054 
8055 	return ret;
8056 }
8057 
8058 static int
8059 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8060 				    struct drm_atomic_state *state,
8061 				    enum surface_update_type *out_type)
8062 {
8063 	struct dc *dc = dm->dc;
8064 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8065 	int i, j, num_plane, ret = 0;
8066 	struct drm_plane_state *old_plane_state, *new_plane_state;
8067 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8068 	struct drm_crtc *new_plane_crtc;
8069 	struct drm_plane *plane;
8070 
8071 	struct drm_crtc *crtc;
8072 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8073 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8074 	struct dc_stream_status *status = NULL;
8075 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8076 	struct surface_info_bundle {
8077 		struct dc_surface_update surface_updates[MAX_SURFACES];
8078 		struct dc_plane_info plane_infos[MAX_SURFACES];
8079 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8080 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8081 		struct dc_stream_update stream_update;
8082 	} *bundle;
8083 
8084 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8085 
8086 	if (!bundle) {
8087 		DRM_ERROR("Failed to allocate update bundle\n");
8088 		/* Set type to FULL to avoid crashing in DC*/
8089 		update_type = UPDATE_TYPE_FULL;
8090 		goto cleanup;
8091 	}
8092 
8093 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8094 
8095 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8096 
8097 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8098 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8099 		num_plane = 0;
8100 
8101 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8102 			update_type = UPDATE_TYPE_FULL;
8103 			goto cleanup;
8104 		}
8105 
8106 		if (!new_dm_crtc_state->stream)
8107 			continue;
8108 
8109 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8110 			const struct amdgpu_framebuffer *amdgpu_fb =
8111 				to_amdgpu_framebuffer(new_plane_state->fb);
8112 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8113 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8114 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8115 			uint64_t tiling_flags;
8116 			bool tmz_surface = false;
8117 
8118 			new_plane_crtc = new_plane_state->crtc;
8119 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8120 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8121 
8122 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8123 				continue;
8124 
8125 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8126 				update_type = UPDATE_TYPE_FULL;
8127 				goto cleanup;
8128 			}
8129 
8130 			if (crtc != new_plane_crtc)
8131 				continue;
8132 
8133 			bundle->surface_updates[num_plane].surface =
8134 					new_dm_plane_state->dc_state;
8135 
8136 			if (new_crtc_state->mode_changed) {
8137 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8138 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8139 			}
8140 
8141 			if (new_crtc_state->color_mgmt_changed) {
8142 				bundle->surface_updates[num_plane].gamma =
8143 						new_dm_plane_state->dc_state->gamma_correction;
8144 				bundle->surface_updates[num_plane].in_transfer_func =
8145 						new_dm_plane_state->dc_state->in_transfer_func;
8146 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8147 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8148 				bundle->stream_update.gamut_remap =
8149 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8150 				bundle->stream_update.output_csc_transform =
8151 						&new_dm_crtc_state->stream->csc_color_matrix;
8152 				bundle->stream_update.out_transfer_func =
8153 						new_dm_crtc_state->stream->out_transfer_func;
8154 			}
8155 
8156 			ret = fill_dc_scaling_info(new_plane_state,
8157 						   scaling_info);
8158 			if (ret)
8159 				goto cleanup;
8160 
8161 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8162 
8163 			if (amdgpu_fb) {
8164 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8165 				if (ret)
8166 					goto cleanup;
8167 
8168 				ret = fill_dc_plane_info_and_addr(
8169 					dm->adev, new_plane_state, tiling_flags,
8170 					plane_info,
8171 					&flip_addr->address, tmz_surface,
8172 					false);
8173 				if (ret)
8174 					goto cleanup;
8175 
8176 				bundle->surface_updates[num_plane].plane_info = plane_info;
8177 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8178 			}
8179 
8180 			num_plane++;
8181 		}
8182 
8183 		if (num_plane == 0)
8184 			continue;
8185 
8186 		ret = dm_atomic_get_state(state, &dm_state);
8187 		if (ret)
8188 			goto cleanup;
8189 
8190 		old_dm_state = dm_atomic_get_old_state(state);
8191 		if (!old_dm_state) {
8192 			ret = -EINVAL;
8193 			goto cleanup;
8194 		}
8195 
8196 		status = dc_stream_get_status_from_state(old_dm_state->context,
8197 							 new_dm_crtc_state->stream);
8198 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8199 		/*
8200 		 * TODO: DC modifies the surface during this call so we need
8201 		 * to lock here - find a way to do this without locking.
8202 		 */
8203 		mutex_lock(&dm->dc_lock);
8204 		update_type = dc_check_update_surfaces_for_stream(
8205 				dc,	bundle->surface_updates, num_plane,
8206 				&bundle->stream_update, status);
8207 		mutex_unlock(&dm->dc_lock);
8208 
8209 		if (update_type > UPDATE_TYPE_MED) {
8210 			update_type = UPDATE_TYPE_FULL;
8211 			goto cleanup;
8212 		}
8213 	}
8214 
8215 cleanup:
8216 	kfree(bundle);
8217 
8218 	*out_type = update_type;
8219 	return ret;
8220 }
8221 
8222 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8223 {
8224 	struct drm_connector *connector;
8225 	struct drm_connector_state *conn_state;
8226 	struct amdgpu_dm_connector *aconnector = NULL;
8227 	int i;
8228 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8229 		if (conn_state->crtc != crtc)
8230 			continue;
8231 
8232 		aconnector = to_amdgpu_dm_connector(connector);
8233 		if (!aconnector->port || !aconnector->mst_port)
8234 			aconnector = NULL;
8235 		else
8236 			break;
8237 	}
8238 
8239 	if (!aconnector)
8240 		return 0;
8241 
8242 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8243 }
8244 
8245 /**
8246  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8247  * @dev: The DRM device
8248  * @state: The atomic state to commit
8249  *
8250  * Validate that the given atomic state is programmable by DC into hardware.
8251  * This involves constructing a &struct dc_state reflecting the new hardware
8252  * state we wish to commit, then querying DC to see if it is programmable. It's
8253  * important not to modify the existing DC state. Otherwise, atomic_check
8254  * may unexpectedly commit hardware changes.
8255  *
8256  * When validating the DC state, it's important that the right locks are
8257  * acquired. For full updates case which removes/adds/updates streams on one
8258  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8259  * that any such full update commit will wait for completion of any outstanding
8260  * flip using DRMs synchronization events. See
8261  * dm_determine_update_type_for_commit()
8262  *
8263  * Note that DM adds the affected connectors for all CRTCs in state, when that
8264  * might not seem necessary. This is because DC stream creation requires the
8265  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8266  * be possible but non-trivial - a possible TODO item.
8267  *
8268  * Return: -Error code if validation failed.
8269  */
8270 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8271 				  struct drm_atomic_state *state)
8272 {
8273 	struct amdgpu_device *adev = dev->dev_private;
8274 	struct dm_atomic_state *dm_state = NULL;
8275 	struct dc *dc = adev->dm.dc;
8276 	struct drm_connector *connector;
8277 	struct drm_connector_state *old_con_state, *new_con_state;
8278 	struct drm_crtc *crtc;
8279 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8280 	struct drm_plane *plane;
8281 	struct drm_plane_state *old_plane_state, *new_plane_state;
8282 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8283 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8284 
8285 	int ret, i;
8286 
8287 	/*
8288 	 * This bool will be set for true for any modeset/reset
8289 	 * or plane update which implies non fast surface update.
8290 	 */
8291 	bool lock_and_validation_needed = false;
8292 
8293 	ret = drm_atomic_helper_check_modeset(dev, state);
8294 	if (ret)
8295 		goto fail;
8296 
8297 	if (adev->asic_type >= CHIP_NAVI10) {
8298 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8299 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8300 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8301 				if (ret)
8302 					goto fail;
8303 			}
8304 		}
8305 	}
8306 
8307 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8308 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8309 		    !new_crtc_state->color_mgmt_changed &&
8310 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8311 			continue;
8312 
8313 		if (!new_crtc_state->enable)
8314 			continue;
8315 
8316 		ret = drm_atomic_add_affected_connectors(state, crtc);
8317 		if (ret)
8318 			return ret;
8319 
8320 		ret = drm_atomic_add_affected_planes(state, crtc);
8321 		if (ret)
8322 			goto fail;
8323 	}
8324 
8325 	/*
8326 	 * Add all primary and overlay planes on the CRTC to the state
8327 	 * whenever a plane is enabled to maintain correct z-ordering
8328 	 * and to enable fast surface updates.
8329 	 */
8330 	drm_for_each_crtc(crtc, dev) {
8331 		bool modified = false;
8332 
8333 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8334 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8335 				continue;
8336 
8337 			if (new_plane_state->crtc == crtc ||
8338 			    old_plane_state->crtc == crtc) {
8339 				modified = true;
8340 				break;
8341 			}
8342 		}
8343 
8344 		if (!modified)
8345 			continue;
8346 
8347 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8348 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8349 				continue;
8350 
8351 			new_plane_state =
8352 				drm_atomic_get_plane_state(state, plane);
8353 
8354 			if (IS_ERR(new_plane_state)) {
8355 				ret = PTR_ERR(new_plane_state);
8356 				goto fail;
8357 			}
8358 		}
8359 	}
8360 
8361 	/* Remove exiting planes if they are modified */
8362 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8363 		ret = dm_update_plane_state(dc, state, plane,
8364 					    old_plane_state,
8365 					    new_plane_state,
8366 					    false,
8367 					    &lock_and_validation_needed);
8368 		if (ret)
8369 			goto fail;
8370 	}
8371 
8372 	/* Disable all crtcs which require disable */
8373 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8374 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8375 					   old_crtc_state,
8376 					   new_crtc_state,
8377 					   false,
8378 					   &lock_and_validation_needed);
8379 		if (ret)
8380 			goto fail;
8381 	}
8382 
8383 	/* Enable all crtcs which require enable */
8384 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8385 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8386 					   old_crtc_state,
8387 					   new_crtc_state,
8388 					   true,
8389 					   &lock_and_validation_needed);
8390 		if (ret)
8391 			goto fail;
8392 	}
8393 
8394 	/* Add new/modified planes */
8395 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8396 		ret = dm_update_plane_state(dc, state, plane,
8397 					    old_plane_state,
8398 					    new_plane_state,
8399 					    true,
8400 					    &lock_and_validation_needed);
8401 		if (ret)
8402 			goto fail;
8403 	}
8404 
8405 	/* Run this here since we want to validate the streams we created */
8406 	ret = drm_atomic_helper_check_planes(dev, state);
8407 	if (ret)
8408 		goto fail;
8409 
8410 	if (state->legacy_cursor_update) {
8411 		/*
8412 		 * This is a fast cursor update coming from the plane update
8413 		 * helper, check if it can be done asynchronously for better
8414 		 * performance.
8415 		 */
8416 		state->async_update =
8417 			!drm_atomic_helper_async_check(dev, state);
8418 
8419 		/*
8420 		 * Skip the remaining global validation if this is an async
8421 		 * update. Cursor updates can be done without affecting
8422 		 * state or bandwidth calcs and this avoids the performance
8423 		 * penalty of locking the private state object and
8424 		 * allocating a new dc_state.
8425 		 */
8426 		if (state->async_update)
8427 			return 0;
8428 	}
8429 
8430 	/* Check scaling and underscan changes*/
8431 	/* TODO Removed scaling changes validation due to inability to commit
8432 	 * new stream into context w\o causing full reset. Need to
8433 	 * decide how to handle.
8434 	 */
8435 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8436 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8437 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8438 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8439 
8440 		/* Skip any modesets/resets */
8441 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8442 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8443 			continue;
8444 
8445 		/* Skip any thing not scale or underscan changes */
8446 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8447 			continue;
8448 
8449 		overall_update_type = UPDATE_TYPE_FULL;
8450 		lock_and_validation_needed = true;
8451 	}
8452 
8453 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8454 	if (ret)
8455 		goto fail;
8456 
8457 	if (overall_update_type < update_type)
8458 		overall_update_type = update_type;
8459 
8460 	/*
8461 	 * lock_and_validation_needed was an old way to determine if we need to set
8462 	 * the global lock. Leaving it in to check if we broke any corner cases
8463 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8464 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8465 	 */
8466 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8467 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8468 
8469 	if (overall_update_type > UPDATE_TYPE_FAST) {
8470 		ret = dm_atomic_get_state(state, &dm_state);
8471 		if (ret)
8472 			goto fail;
8473 
8474 		ret = do_aquire_global_lock(dev, state);
8475 		if (ret)
8476 			goto fail;
8477 
8478 #if defined(CONFIG_DRM_AMD_DC_DCN)
8479 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8480 			goto fail;
8481 
8482 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8483 		if (ret)
8484 			goto fail;
8485 #endif
8486 
8487 		/*
8488 		 * Perform validation of MST topology in the state:
8489 		 * We need to perform MST atomic check before calling
8490 		 * dc_validate_global_state(), or there is a chance
8491 		 * to get stuck in an infinite loop and hang eventually.
8492 		 */
8493 		ret = drm_dp_mst_atomic_check(state);
8494 		if (ret)
8495 			goto fail;
8496 
8497 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8498 			ret = -EINVAL;
8499 			goto fail;
8500 		}
8501 	} else {
8502 		/*
8503 		 * The commit is a fast update. Fast updates shouldn't change
8504 		 * the DC context, affect global validation, and can have their
8505 		 * commit work done in parallel with other commits not touching
8506 		 * the same resource. If we have a new DC context as part of
8507 		 * the DM atomic state from validation we need to free it and
8508 		 * retain the existing one instead.
8509 		 */
8510 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8511 
8512 		new_dm_state = dm_atomic_get_new_state(state);
8513 		old_dm_state = dm_atomic_get_old_state(state);
8514 
8515 		if (new_dm_state && old_dm_state) {
8516 			if (new_dm_state->context)
8517 				dc_release_state(new_dm_state->context);
8518 
8519 			new_dm_state->context = old_dm_state->context;
8520 
8521 			if (old_dm_state->context)
8522 				dc_retain_state(old_dm_state->context);
8523 		}
8524 	}
8525 
8526 	/* Store the overall update type for use later in atomic check. */
8527 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8528 		struct dm_crtc_state *dm_new_crtc_state =
8529 			to_dm_crtc_state(new_crtc_state);
8530 
8531 		dm_new_crtc_state->update_type = (int)overall_update_type;
8532 	}
8533 
8534 	/* Must be success */
8535 	WARN_ON(ret);
8536 	return ret;
8537 
8538 fail:
8539 	if (ret == -EDEADLK)
8540 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8541 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8542 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8543 	else
8544 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8545 
8546 	return ret;
8547 }
8548 
8549 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8550 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8551 {
8552 	uint8_t dpcd_data;
8553 	bool capable = false;
8554 
8555 	if (amdgpu_dm_connector->dc_link &&
8556 		dm_helpers_dp_read_dpcd(
8557 				NULL,
8558 				amdgpu_dm_connector->dc_link,
8559 				DP_DOWN_STREAM_PORT_COUNT,
8560 				&dpcd_data,
8561 				sizeof(dpcd_data))) {
8562 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8563 	}
8564 
8565 	return capable;
8566 }
8567 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8568 					struct edid *edid)
8569 {
8570 	int i;
8571 	bool edid_check_required;
8572 	struct detailed_timing *timing;
8573 	struct detailed_non_pixel *data;
8574 	struct detailed_data_monitor_range *range;
8575 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8576 			to_amdgpu_dm_connector(connector);
8577 	struct dm_connector_state *dm_con_state = NULL;
8578 
8579 	struct drm_device *dev = connector->dev;
8580 	struct amdgpu_device *adev = dev->dev_private;
8581 	bool freesync_capable = false;
8582 
8583 	if (!connector->state) {
8584 		DRM_ERROR("%s - Connector has no state", __func__);
8585 		goto update;
8586 	}
8587 
8588 	if (!edid) {
8589 		dm_con_state = to_dm_connector_state(connector->state);
8590 
8591 		amdgpu_dm_connector->min_vfreq = 0;
8592 		amdgpu_dm_connector->max_vfreq = 0;
8593 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8594 
8595 		goto update;
8596 	}
8597 
8598 	dm_con_state = to_dm_connector_state(connector->state);
8599 
8600 	edid_check_required = false;
8601 	if (!amdgpu_dm_connector->dc_sink) {
8602 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8603 		goto update;
8604 	}
8605 	if (!adev->dm.freesync_module)
8606 		goto update;
8607 	/*
8608 	 * if edid non zero restrict freesync only for dp and edp
8609 	 */
8610 	if (edid) {
8611 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8612 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8613 			edid_check_required = is_dp_capable_without_timing_msa(
8614 						adev->dm.dc,
8615 						amdgpu_dm_connector);
8616 		}
8617 	}
8618 	if (edid_check_required == true && (edid->version > 1 ||
8619 	   (edid->version == 1 && edid->revision > 1))) {
8620 		for (i = 0; i < 4; i++) {
8621 
8622 			timing	= &edid->detailed_timings[i];
8623 			data	= &timing->data.other_data;
8624 			range	= &data->data.range;
8625 			/*
8626 			 * Check if monitor has continuous frequency mode
8627 			 */
8628 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8629 				continue;
8630 			/*
8631 			 * Check for flag range limits only. If flag == 1 then
8632 			 * no additional timing information provided.
8633 			 * Default GTF, GTF Secondary curve and CVT are not
8634 			 * supported
8635 			 */
8636 			if (range->flags != 1)
8637 				continue;
8638 
8639 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8640 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8641 			amdgpu_dm_connector->pixel_clock_mhz =
8642 				range->pixel_clock_mhz * 10;
8643 			break;
8644 		}
8645 
8646 		if (amdgpu_dm_connector->max_vfreq -
8647 		    amdgpu_dm_connector->min_vfreq > 10) {
8648 
8649 			freesync_capable = true;
8650 		}
8651 	}
8652 
8653 update:
8654 	if (dm_con_state)
8655 		dm_con_state->freesync_capable = freesync_capable;
8656 
8657 	if (connector->vrr_capable_property)
8658 		drm_connector_set_vrr_capable_property(connector,
8659 						       freesync_capable);
8660 }
8661 
8662 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8663 {
8664 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8665 
8666 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8667 		return;
8668 	if (link->type == dc_connection_none)
8669 		return;
8670 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8671 					dpcd_data, sizeof(dpcd_data))) {
8672 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8673 
8674 		if (dpcd_data[0] == 0) {
8675 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8676 			link->psr_settings.psr_feature_enabled = false;
8677 		} else {
8678 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8679 			link->psr_settings.psr_feature_enabled = true;
8680 		}
8681 
8682 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8683 	}
8684 }
8685 
8686 /*
8687  * amdgpu_dm_link_setup_psr() - configure psr link
8688  * @stream: stream state
8689  *
8690  * Return: true if success
8691  */
8692 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8693 {
8694 	struct dc_link *link = NULL;
8695 	struct psr_config psr_config = {0};
8696 	struct psr_context psr_context = {0};
8697 	bool ret = false;
8698 
8699 	if (stream == NULL)
8700 		return false;
8701 
8702 	link = stream->link;
8703 
8704 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8705 
8706 	if (psr_config.psr_version > 0) {
8707 		psr_config.psr_exit_link_training_required = 0x1;
8708 		psr_config.psr_frame_capture_indication_req = 0;
8709 		psr_config.psr_rfb_setup_time = 0x37;
8710 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8711 		psr_config.allow_smu_optimizations = 0x0;
8712 
8713 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8714 
8715 	}
8716 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
8717 
8718 	return ret;
8719 }
8720 
8721 /*
8722  * amdgpu_dm_psr_enable() - enable psr f/w
8723  * @stream: stream state
8724  *
8725  * Return: true if success
8726  */
8727 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8728 {
8729 	struct dc_link *link = stream->link;
8730 	unsigned int vsync_rate_hz = 0;
8731 	struct dc_static_screen_params params = {0};
8732 	/* Calculate number of static frames before generating interrupt to
8733 	 * enter PSR.
8734 	 */
8735 	// Init fail safe of 2 frames static
8736 	unsigned int num_frames_static = 2;
8737 
8738 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8739 
8740 	vsync_rate_hz = div64_u64(div64_u64((
8741 			stream->timing.pix_clk_100hz * 100),
8742 			stream->timing.v_total),
8743 			stream->timing.h_total);
8744 
8745 	/* Round up
8746 	 * Calculate number of frames such that at least 30 ms of time has
8747 	 * passed.
8748 	 */
8749 	if (vsync_rate_hz != 0) {
8750 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8751 		num_frames_static = (30000 / frame_time_microsec) + 1;
8752 	}
8753 
8754 	params.triggers.cursor_update = true;
8755 	params.triggers.overlay_update = true;
8756 	params.triggers.surface_update = true;
8757 	params.num_frames = num_frames_static;
8758 
8759 	dc_stream_set_static_screen_params(link->ctx->dc,
8760 					   &stream, 1,
8761 					   &params);
8762 
8763 	return dc_link_set_psr_allow_active(link, true, false);
8764 }
8765 
8766 /*
8767  * amdgpu_dm_psr_disable() - disable psr f/w
8768  * @stream:  stream state
8769  *
8770  * Return: true if success
8771  */
8772 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8773 {
8774 
8775 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8776 
8777 	return dc_link_set_psr_allow_active(stream->link, false, true);
8778 }
8779