xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 3da3cc1b5f47115b16b5ffeeb4bf09ec331b0164)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 /*
217  * dm_vblank_get_counter
218  *
219  * @brief
220  * Get counter for number of vertical blanks
221  *
222  * @param
223  * struct amdgpu_device *adev - [in] desired amdgpu device
224  * int disp_idx - [in] which CRTC to get the counter from
225  *
226  * @return
227  * Counter for vertical blanks
228  */
229 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 {
231 	if (crtc >= adev->mode_info.num_crtc)
232 		return 0;
233 	else {
234 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 
236 		if (acrtc->dm_irq_params.stream == NULL) {
237 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
238 				  crtc);
239 			return 0;
240 		}
241 
242 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
243 	}
244 }
245 
246 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 				  u32 *vbl, u32 *position)
248 {
249 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 
251 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
252 		return -EINVAL;
253 	else {
254 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 
256 		if (acrtc->dm_irq_params.stream ==  NULL) {
257 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
258 				  crtc);
259 			return 0;
260 		}
261 
262 		/*
263 		 * TODO rework base driver to use values directly.
264 		 * for now parse it back into reg-format
265 		 */
266 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
267 					 &v_blank_start,
268 					 &v_blank_end,
269 					 &h_position,
270 					 &v_position);
271 
272 		*position = v_position | (h_position << 16);
273 		*vbl = v_blank_start | (v_blank_end << 16);
274 	}
275 
276 	return 0;
277 }
278 
279 static bool dm_is_idle(void *handle)
280 {
281 	/* XXX todo */
282 	return true;
283 }
284 
285 static int dm_wait_for_idle(void *handle)
286 {
287 	/* XXX todo */
288 	return 0;
289 }
290 
291 static bool dm_check_soft_reset(void *handle)
292 {
293 	return false;
294 }
295 
296 static int dm_soft_reset(void *handle)
297 {
298 	/* XXX todo */
299 	return 0;
300 }
301 
302 static struct amdgpu_crtc *
303 get_crtc_by_otg_inst(struct amdgpu_device *adev,
304 		     int otg_inst)
305 {
306 	struct drm_device *dev = adev_to_drm(adev);
307 	struct drm_crtc *crtc;
308 	struct amdgpu_crtc *amdgpu_crtc;
309 
310 	if (otg_inst == -1) {
311 		WARN_ON(1);
312 		return adev->mode_info.crtcs[0];
313 	}
314 
315 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316 		amdgpu_crtc = to_amdgpu_crtc(crtc);
317 
318 		if (amdgpu_crtc->otg_inst == otg_inst)
319 			return amdgpu_crtc;
320 	}
321 
322 	return NULL;
323 }
324 
325 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 {
327 	return acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_VARIABLE ||
329 	       acrtc->dm_irq_params.freesync_config.state ==
330 		       VRR_STATE_ACTIVE_FIXED;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 {
335 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
337 }
338 
339 /**
340  * dm_pflip_high_irq() - Handle pageflip interrupt
341  * @interrupt_params: ignored
342  *
343  * Handles the pageflip interrupt by notifying all interested parties
344  * that the pageflip has been completed.
345  */
346 static void dm_pflip_high_irq(void *interrupt_params)
347 {
348 	struct amdgpu_crtc *amdgpu_crtc;
349 	struct common_irq_params *irq_params = interrupt_params;
350 	struct amdgpu_device *adev = irq_params->adev;
351 	unsigned long flags;
352 	struct drm_pending_vblank_event *e;
353 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
354 	bool vrr_active;
355 
356 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 
358 	/* IRQ could occur when in initial stage */
359 	/* TODO work and BO cleanup */
360 	if (amdgpu_crtc == NULL) {
361 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
362 		return;
363 	}
364 
365 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 
367 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369 						 amdgpu_crtc->pflip_status,
370 						 AMDGPU_FLIP_SUBMITTED,
371 						 amdgpu_crtc->crtc_id,
372 						 amdgpu_crtc);
373 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
374 		return;
375 	}
376 
377 	/* page flip completed. */
378 	e = amdgpu_crtc->event;
379 	amdgpu_crtc->event = NULL;
380 
381 	if (!e)
382 		WARN_ON(1);
383 
384 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 
386 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 	if (!vrr_active ||
388 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
389 				      &v_blank_end, &hpos, &vpos) ||
390 	    (vpos < v_blank_start)) {
391 		/* Update to correct count and vblank timestamp if racing with
392 		 * vblank irq. This also updates to the correct vblank timestamp
393 		 * even in VRR mode, as scanout is past the front-porch atm.
394 		 */
395 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 
397 		/* Wake up userspace by sending the pageflip event with proper
398 		 * count and timestamp of vblank of flip completion.
399 		 */
400 		if (e) {
401 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 
403 			/* Event sent, so done with vblank for this flip */
404 			drm_crtc_vblank_put(&amdgpu_crtc->base);
405 		}
406 	} else if (e) {
407 		/* VRR active and inside front-porch: vblank count and
408 		 * timestamp for pageflip event will only be up to date after
409 		 * drm_crtc_handle_vblank() has been executed from late vblank
410 		 * irq handler after start of back-porch (vline 0). We queue the
411 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
412 		 * updated timestamp and count, once it runs after us.
413 		 *
414 		 * We need to open-code this instead of using the helper
415 		 * drm_crtc_arm_vblank_event(), as that helper would
416 		 * call drm_crtc_accurate_vblank_count(), which we must
417 		 * not call in VRR mode while we are in front-porch!
418 		 */
419 
420 		/* sequence will be replaced by real count during send-out. */
421 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422 		e->pipe = amdgpu_crtc->crtc_id;
423 
424 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
425 		e = NULL;
426 	}
427 
428 	/* Keep track of vblank of this flip for flip throttling. We use the
429 	 * cooked hw counter, as that one incremented at start of this vblank
430 	 * of pageflip completion, so last_flip_vblank is the forbidden count
431 	 * for queueing new pageflips if vsync + VRR is enabled.
432 	 */
433 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
434 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 
436 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
437 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 
439 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
441 			 vrr_active, (int) !e);
442 }
443 
444 static void dm_vupdate_high_irq(void *interrupt_params)
445 {
446 	struct common_irq_params *irq_params = interrupt_params;
447 	struct amdgpu_device *adev = irq_params->adev;
448 	struct amdgpu_crtc *acrtc;
449 	unsigned long flags;
450 	int vrr_active;
451 
452 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
453 
454 	if (acrtc) {
455 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 
457 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
458 			      acrtc->crtc_id,
459 			      vrr_active);
460 
461 		/* Core vblank handling is done here after end of front-porch in
462 		 * vrr mode, as vblank timestamping will give valid results
463 		 * while now done after front-porch. This will also deliver
464 		 * page-flip completion events that have been queued to us
465 		 * if a pageflip happened inside front-porch.
466 		 */
467 		if (vrr_active) {
468 			drm_crtc_handle_vblank(&acrtc->base);
469 
470 			/* BTR processing for pre-DCE12 ASICs */
471 			if (acrtc->dm_irq_params.stream &&
472 			    adev->family < AMDGPU_FAMILY_AI) {
473 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
474 				mod_freesync_handle_v_update(
475 				    adev->dm.freesync_module,
476 				    acrtc->dm_irq_params.stream,
477 				    &acrtc->dm_irq_params.vrr_params);
478 
479 				dc_stream_adjust_vmin_vmax(
480 				    adev->dm.dc,
481 				    acrtc->dm_irq_params.stream,
482 				    &acrtc->dm_irq_params.vrr_params.adjust);
483 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
484 			}
485 		}
486 	}
487 }
488 
489 /**
490  * dm_crtc_high_irq() - Handles CRTC interrupt
491  * @interrupt_params: used for determining the CRTC instance
492  *
493  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
494  * event handler.
495  */
496 static void dm_crtc_high_irq(void *interrupt_params)
497 {
498 	struct common_irq_params *irq_params = interrupt_params;
499 	struct amdgpu_device *adev = irq_params->adev;
500 	struct amdgpu_crtc *acrtc;
501 	unsigned long flags;
502 	int vrr_active;
503 
504 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
505 	if (!acrtc)
506 		return;
507 
508 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 
510 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
511 		      vrr_active, acrtc->dm_irq_params.active_planes);
512 
513 	/**
514 	 * Core vblank handling at start of front-porch is only possible
515 	 * in non-vrr mode, as only there vblank timestamping will give
516 	 * valid results while done in front-porch. Otherwise defer it
517 	 * to dm_vupdate_high_irq after end of front-porch.
518 	 */
519 	if (!vrr_active)
520 		drm_crtc_handle_vblank(&acrtc->base);
521 
522 	/**
523 	 * Following stuff must happen at start of vblank, for crc
524 	 * computation and below-the-range btr support in vrr mode.
525 	 */
526 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 
528 	/* BTR updates need to happen before VUPDATE on Vega and above. */
529 	if (adev->family < AMDGPU_FAMILY_AI)
530 		return;
531 
532 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 
534 	if (acrtc->dm_irq_params.stream &&
535 	    acrtc->dm_irq_params.vrr_params.supported &&
536 	    acrtc->dm_irq_params.freesync_config.state ==
537 		    VRR_STATE_ACTIVE_VARIABLE) {
538 		mod_freesync_handle_v_update(adev->dm.freesync_module,
539 					     acrtc->dm_irq_params.stream,
540 					     &acrtc->dm_irq_params.vrr_params);
541 
542 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543 					   &acrtc->dm_irq_params.vrr_params.adjust);
544 	}
545 
546 	/*
547 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
548 	 * In that case, pageflip completion interrupts won't fire and pageflip
549 	 * completion events won't get delivered. Prevent this by sending
550 	 * pending pageflip events from here if a flip is still pending.
551 	 *
552 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
553 	 * avoid race conditions between flip programming and completion,
554 	 * which could cause too early flip completion events.
555 	 */
556 	if (adev->family >= AMDGPU_FAMILY_RV &&
557 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558 	    acrtc->dm_irq_params.active_planes == 0) {
559 		if (acrtc->event) {
560 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 			acrtc->event = NULL;
562 			drm_crtc_vblank_put(&acrtc->base);
563 		}
564 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
565 	}
566 
567 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
568 }
569 
570 static int dm_set_clockgating_state(void *handle,
571 		  enum amd_clockgating_state state)
572 {
573 	return 0;
574 }
575 
576 static int dm_set_powergating_state(void *handle,
577 		  enum amd_powergating_state state)
578 {
579 	return 0;
580 }
581 
582 /* Prototypes of private functions */
583 static int dm_early_init(void* handle);
584 
585 /* Allocate memory for FBC compressed data  */
586 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 {
588 	struct drm_device *dev = connector->dev;
589 	struct amdgpu_device *adev = drm_to_adev(dev);
590 	struct dm_compressor_info *compressor = &adev->dm.compressor;
591 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592 	struct drm_display_mode *mode;
593 	unsigned long max_size = 0;
594 
595 	if (adev->dm.dc->fbc_compressor == NULL)
596 		return;
597 
598 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
599 		return;
600 
601 	if (compressor->bo_ptr)
602 		return;
603 
604 
605 	list_for_each_entry(mode, &connector->modes, head) {
606 		if (max_size < mode->htotal * mode->vtotal)
607 			max_size = mode->htotal * mode->vtotal;
608 	}
609 
610 	if (max_size) {
611 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
612 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
613 			    &compressor->gpu_addr, &compressor->cpu_addr);
614 
615 		if (r)
616 			DRM_ERROR("DM: Failed to initialize FBC\n");
617 		else {
618 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
620 		}
621 
622 	}
623 
624 }
625 
626 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627 					  int pipe, bool *enabled,
628 					  unsigned char *buf, int max_bytes)
629 {
630 	struct drm_device *dev = dev_get_drvdata(kdev);
631 	struct amdgpu_device *adev = drm_to_adev(dev);
632 	struct drm_connector *connector;
633 	struct drm_connector_list_iter conn_iter;
634 	struct amdgpu_dm_connector *aconnector;
635 	int ret = 0;
636 
637 	*enabled = false;
638 
639 	mutex_lock(&adev->dm.audio_lock);
640 
641 	drm_connector_list_iter_begin(dev, &conn_iter);
642 	drm_for_each_connector_iter(connector, &conn_iter) {
643 		aconnector = to_amdgpu_dm_connector(connector);
644 		if (aconnector->audio_inst != port)
645 			continue;
646 
647 		*enabled = true;
648 		ret = drm_eld_size(connector->eld);
649 		memcpy(buf, connector->eld, min(max_bytes, ret));
650 
651 		break;
652 	}
653 	drm_connector_list_iter_end(&conn_iter);
654 
655 	mutex_unlock(&adev->dm.audio_lock);
656 
657 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
658 
659 	return ret;
660 }
661 
662 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663 	.get_eld = amdgpu_dm_audio_component_get_eld,
664 };
665 
666 static int amdgpu_dm_audio_component_bind(struct device *kdev,
667 				       struct device *hda_kdev, void *data)
668 {
669 	struct drm_device *dev = dev_get_drvdata(kdev);
670 	struct amdgpu_device *adev = drm_to_adev(dev);
671 	struct drm_audio_component *acomp = data;
672 
673 	acomp->ops = &amdgpu_dm_audio_component_ops;
674 	acomp->dev = kdev;
675 	adev->dm.audio_component = acomp;
676 
677 	return 0;
678 }
679 
680 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681 					  struct device *hda_kdev, void *data)
682 {
683 	struct drm_device *dev = dev_get_drvdata(kdev);
684 	struct amdgpu_device *adev = drm_to_adev(dev);
685 	struct drm_audio_component *acomp = data;
686 
687 	acomp->ops = NULL;
688 	acomp->dev = NULL;
689 	adev->dm.audio_component = NULL;
690 }
691 
692 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693 	.bind	= amdgpu_dm_audio_component_bind,
694 	.unbind	= amdgpu_dm_audio_component_unbind,
695 };
696 
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698 {
699 	int i, ret;
700 
701 	if (!amdgpu_audio)
702 		return 0;
703 
704 	adev->mode_info.audio.enabled = true;
705 
706 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 
708 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 		adev->mode_info.audio.pin[i].channels = -1;
710 		adev->mode_info.audio.pin[i].rate = -1;
711 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 		adev->mode_info.audio.pin[i].status_bits = 0;
713 		adev->mode_info.audio.pin[i].category_code = 0;
714 		adev->mode_info.audio.pin[i].connected = false;
715 		adev->mode_info.audio.pin[i].id =
716 			adev->dm.dc->res_pool->audios[i]->inst;
717 		adev->mode_info.audio.pin[i].offset = 0;
718 	}
719 
720 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
721 	if (ret < 0)
722 		return ret;
723 
724 	adev->dm.audio_registered = true;
725 
726 	return 0;
727 }
728 
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730 {
731 	if (!amdgpu_audio)
732 		return;
733 
734 	if (!adev->mode_info.audio.enabled)
735 		return;
736 
737 	if (adev->dm.audio_registered) {
738 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 		adev->dm.audio_registered = false;
740 	}
741 
742 	/* TODO: Disable audio? */
743 
744 	adev->mode_info.audio.enabled = false;
745 }
746 
747 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 {
749 	struct drm_audio_component *acomp = adev->dm.audio_component;
750 
751 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 
754 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 						 pin, -1);
756 	}
757 }
758 
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 {
761 	const struct dmcub_firmware_header_v1_0 *hdr;
762 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 	struct abm *abm = adev->dm.dc->res_pool->abm;
767 	struct dmub_srv_hw_params hw_params;
768 	enum dmub_status status;
769 	const unsigned char *fw_inst_const, *fw_bss_data;
770 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
771 	bool has_hw_support;
772 
773 	if (!dmub_srv)
774 		/* DMUB isn't supported on the ASIC. */
775 		return 0;
776 
777 	if (!fb_info) {
778 		DRM_ERROR("No framebuffer info for DMUB service.\n");
779 		return -EINVAL;
780 	}
781 
782 	if (!dmub_fw) {
783 		/* Firmware required for DMUB support. */
784 		DRM_ERROR("No firmware provided for DMUB.\n");
785 		return -EINVAL;
786 	}
787 
788 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 	if (status != DMUB_STATUS_OK) {
790 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
791 		return -EINVAL;
792 	}
793 
794 	if (!has_hw_support) {
795 		DRM_INFO("DMUB unsupported on ASIC\n");
796 		return 0;
797 	}
798 
799 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 
801 	fw_inst_const = dmub_fw->data +
802 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803 			PSP_HEADER_BYTES;
804 
805 	fw_bss_data = dmub_fw->data +
806 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 		      le32_to_cpu(hdr->inst_const_bytes);
808 
809 	/* Copy firmware and bios info into FB memory. */
810 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 
813 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 
815 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 	 * amdgpu_ucode_init_single_fw will load dmub firmware
817 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 	 * will be done by dm_dmub_hw_init
819 	 */
820 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
822 				fw_inst_const_size);
823 	}
824 
825 	if (fw_bss_data_size)
826 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 		       fw_bss_data, fw_bss_data_size);
828 
829 	/* Copy firmware bios info into FB memory. */
830 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
831 	       adev->bios_size);
832 
833 	/* Reset regions that need to be reset. */
834 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 
837 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 
840 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 
843 	/* Initialize hardware. */
844 	memset(&hw_params, 0, sizeof(hw_params));
845 	hw_params.fb_base = adev->gmc.fb_start;
846 	hw_params.fb_offset = adev->gmc.aper_base;
847 
848 	/* backdoor load firmware and trigger dmub running */
849 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 		hw_params.load_inst_const = true;
851 
852 	if (dmcu)
853 		hw_params.psp_version = dmcu->psp_version;
854 
855 	for (i = 0; i < fb_info->num_fb; ++i)
856 		hw_params.fb[i] = &fb_info->fb[i];
857 
858 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 	if (status != DMUB_STATUS_OK) {
860 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
861 		return -EINVAL;
862 	}
863 
864 	/* Wait for firmware load to finish. */
865 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 	if (status != DMUB_STATUS_OK)
867 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 
869 	/* Init DMCU and ABM if available. */
870 	if (dmcu && abm) {
871 		dmcu->funcs->dmcu_init(dmcu);
872 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 	}
874 
875 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876 	if (!adev->dm.dc->ctx->dmub_srv) {
877 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
878 		return -ENOMEM;
879 	}
880 
881 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882 		 adev->dm.dmcub_fw_version);
883 
884 	return 0;
885 }
886 
887 #if defined(CONFIG_DRM_AMD_DC_DCN)
888 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
889 {
890 	uint64_t pt_base;
891 	uint32_t logical_addr_low;
892 	uint32_t logical_addr_high;
893 	uint32_t agp_base, agp_bot, agp_top;
894 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 
896 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 
899 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 		/*
901 		 * Raven2 has a HW issue that it is unable to use the vram which
902 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903 		 * workaround that increase system aperture high address (add 1)
904 		 * to get rid of the VM fault and hardware hang.
905 		 */
906 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 	else
908 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
909 
910 	agp_base = 0;
911 	agp_bot = adev->gmc.agp_start >> 24;
912 	agp_top = adev->gmc.agp_end >> 24;
913 
914 
915 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920 	page_table_base.low_part = lower_32_bits(pt_base);
921 
922 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 
925 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 
929 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 
933 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 
937 	pa_config->is_hvm_enabled = 0;
938 
939 }
940 #endif
941 
942 static int amdgpu_dm_init(struct amdgpu_device *adev)
943 {
944 	struct dc_init_data init_data;
945 #ifdef CONFIG_DRM_AMD_DC_HDCP
946 	struct dc_callback_init init_params;
947 #endif
948 	int r;
949 
950 	adev->dm.ddev = adev_to_drm(adev);
951 	adev->dm.adev = adev;
952 
953 	/* Zero all the fields */
954 	memset(&init_data, 0, sizeof(init_data));
955 #ifdef CONFIG_DRM_AMD_DC_HDCP
956 	memset(&init_params, 0, sizeof(init_params));
957 #endif
958 
959 	mutex_init(&adev->dm.dc_lock);
960 	mutex_init(&adev->dm.audio_lock);
961 
962 	if(amdgpu_dm_irq_init(adev)) {
963 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
964 		goto error;
965 	}
966 
967 	init_data.asic_id.chip_family = adev->family;
968 
969 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
970 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
971 
972 	init_data.asic_id.vram_width = adev->gmc.vram_width;
973 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
974 	init_data.asic_id.atombios_base_address =
975 		adev->mode_info.atom_context->bios;
976 
977 	init_data.driver = adev;
978 
979 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
980 
981 	if (!adev->dm.cgs_device) {
982 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
983 		goto error;
984 	}
985 
986 	init_data.cgs_device = adev->dm.cgs_device;
987 
988 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
989 
990 	switch (adev->asic_type) {
991 	case CHIP_CARRIZO:
992 	case CHIP_STONEY:
993 	case CHIP_RAVEN:
994 	case CHIP_RENOIR:
995 		init_data.flags.gpu_vm_support = true;
996 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
997 			init_data.flags.disable_dmcu = true;
998 		break;
999 #if defined(CONFIG_DRM_AMD_DC_DCN)
1000 	case CHIP_VANGOGH:
1001 		init_data.flags.gpu_vm_support = true;
1002 		break;
1003 #endif
1004 	default:
1005 		break;
1006 	}
1007 
1008 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1009 		init_data.flags.fbc_support = true;
1010 
1011 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1012 		init_data.flags.multi_mon_pp_mclk_switch = true;
1013 
1014 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1015 		init_data.flags.disable_fractional_pwm = true;
1016 
1017 	init_data.flags.power_down_display_on_boot = true;
1018 
1019 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1020 
1021 	/* Display Core create. */
1022 	adev->dm.dc = dc_create(&init_data);
1023 
1024 	if (adev->dm.dc) {
1025 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1026 	} else {
1027 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1028 		goto error;
1029 	}
1030 
1031 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1032 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1033 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1034 	}
1035 
1036 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1037 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1038 
1039 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1040 		adev->dm.dc->debug.disable_stutter = true;
1041 
1042 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1043 		adev->dm.dc->debug.disable_dsc = true;
1044 
1045 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1046 		adev->dm.dc->debug.disable_clock_gate = true;
1047 
1048 	r = dm_dmub_hw_init(adev);
1049 	if (r) {
1050 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1051 		goto error;
1052 	}
1053 
1054 	dc_hardware_init(adev->dm.dc);
1055 
1056 #if defined(CONFIG_DRM_AMD_DC_DCN)
1057 	if (adev->apu_flags) {
1058 		struct dc_phy_addr_space_config pa_config;
1059 
1060 		mmhub_read_system_context(adev, &pa_config);
1061 
1062 		// Call the DC init_memory func
1063 		dc_setup_system_context(adev->dm.dc, &pa_config);
1064 	}
1065 #endif
1066 
1067 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1068 	if (!adev->dm.freesync_module) {
1069 		DRM_ERROR(
1070 		"amdgpu: failed to initialize freesync_module.\n");
1071 	} else
1072 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1073 				adev->dm.freesync_module);
1074 
1075 	amdgpu_dm_init_color_mod();
1076 
1077 #ifdef CONFIG_DRM_AMD_DC_HDCP
1078 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1079 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1080 
1081 		if (!adev->dm.hdcp_workqueue)
1082 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1083 		else
1084 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1085 
1086 		dc_init_callbacks(adev->dm.dc, &init_params);
1087 	}
1088 #endif
1089 	if (amdgpu_dm_initialize_drm_device(adev)) {
1090 		DRM_ERROR(
1091 		"amdgpu: failed to initialize sw for display support.\n");
1092 		goto error;
1093 	}
1094 
1095 	/* create fake encoders for MST */
1096 	dm_dp_create_fake_mst_encoders(adev);
1097 
1098 	/* TODO: Add_display_info? */
1099 
1100 	/* TODO use dynamic cursor width */
1101 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1102 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1103 
1104 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1105 		DRM_ERROR(
1106 		"amdgpu: failed to initialize sw for display support.\n");
1107 		goto error;
1108 	}
1109 
1110 
1111 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1112 
1113 	return 0;
1114 error:
1115 	amdgpu_dm_fini(adev);
1116 
1117 	return -EINVAL;
1118 }
1119 
1120 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1121 {
1122 	int i;
1123 
1124 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1125 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1126 	}
1127 
1128 	amdgpu_dm_audio_fini(adev);
1129 
1130 	amdgpu_dm_destroy_drm_device(&adev->dm);
1131 
1132 #ifdef CONFIG_DRM_AMD_DC_HDCP
1133 	if (adev->dm.hdcp_workqueue) {
1134 		hdcp_destroy(adev->dm.hdcp_workqueue);
1135 		adev->dm.hdcp_workqueue = NULL;
1136 	}
1137 
1138 	if (adev->dm.dc)
1139 		dc_deinit_callbacks(adev->dm.dc);
1140 #endif
1141 	if (adev->dm.dc->ctx->dmub_srv) {
1142 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1143 		adev->dm.dc->ctx->dmub_srv = NULL;
1144 	}
1145 
1146 	if (adev->dm.dmub_bo)
1147 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1148 				      &adev->dm.dmub_bo_gpu_addr,
1149 				      &adev->dm.dmub_bo_cpu_addr);
1150 
1151 	/* DC Destroy TODO: Replace destroy DAL */
1152 	if (adev->dm.dc)
1153 		dc_destroy(&adev->dm.dc);
1154 	/*
1155 	 * TODO: pageflip, vlank interrupt
1156 	 *
1157 	 * amdgpu_dm_irq_fini(adev);
1158 	 */
1159 
1160 	if (adev->dm.cgs_device) {
1161 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1162 		adev->dm.cgs_device = NULL;
1163 	}
1164 	if (adev->dm.freesync_module) {
1165 		mod_freesync_destroy(adev->dm.freesync_module);
1166 		adev->dm.freesync_module = NULL;
1167 	}
1168 
1169 	mutex_destroy(&adev->dm.audio_lock);
1170 	mutex_destroy(&adev->dm.dc_lock);
1171 
1172 	return;
1173 }
1174 
1175 static int load_dmcu_fw(struct amdgpu_device *adev)
1176 {
1177 	const char *fw_name_dmcu = NULL;
1178 	int r;
1179 	const struct dmcu_firmware_header_v1_0 *hdr;
1180 
1181 	switch(adev->asic_type) {
1182 #if defined(CONFIG_DRM_AMD_DC_SI)
1183 	case CHIP_TAHITI:
1184 	case CHIP_PITCAIRN:
1185 	case CHIP_VERDE:
1186 	case CHIP_OLAND:
1187 #endif
1188 	case CHIP_BONAIRE:
1189 	case CHIP_HAWAII:
1190 	case CHIP_KAVERI:
1191 	case CHIP_KABINI:
1192 	case CHIP_MULLINS:
1193 	case CHIP_TONGA:
1194 	case CHIP_FIJI:
1195 	case CHIP_CARRIZO:
1196 	case CHIP_STONEY:
1197 	case CHIP_POLARIS11:
1198 	case CHIP_POLARIS10:
1199 	case CHIP_POLARIS12:
1200 	case CHIP_VEGAM:
1201 	case CHIP_VEGA10:
1202 	case CHIP_VEGA12:
1203 	case CHIP_VEGA20:
1204 	case CHIP_NAVI10:
1205 	case CHIP_NAVI14:
1206 	case CHIP_RENOIR:
1207 	case CHIP_SIENNA_CICHLID:
1208 	case CHIP_NAVY_FLOUNDER:
1209 	case CHIP_DIMGREY_CAVEFISH:
1210 	case CHIP_VANGOGH:
1211 		return 0;
1212 	case CHIP_NAVI12:
1213 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1214 		break;
1215 	case CHIP_RAVEN:
1216 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1217 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1218 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1219 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1220 		else
1221 			return 0;
1222 		break;
1223 	default:
1224 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1229 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1230 		return 0;
1231 	}
1232 
1233 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1234 	if (r == -ENOENT) {
1235 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1236 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1237 		adev->dm.fw_dmcu = NULL;
1238 		return 0;
1239 	}
1240 	if (r) {
1241 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1242 			fw_name_dmcu);
1243 		return r;
1244 	}
1245 
1246 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1247 	if (r) {
1248 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1249 			fw_name_dmcu);
1250 		release_firmware(adev->dm.fw_dmcu);
1251 		adev->dm.fw_dmcu = NULL;
1252 		return r;
1253 	}
1254 
1255 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1256 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1257 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1258 	adev->firmware.fw_size +=
1259 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1260 
1261 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1262 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1263 	adev->firmware.fw_size +=
1264 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1265 
1266 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1267 
1268 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1269 
1270 	return 0;
1271 }
1272 
1273 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1274 {
1275 	struct amdgpu_device *adev = ctx;
1276 
1277 	return dm_read_reg(adev->dm.dc->ctx, address);
1278 }
1279 
1280 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1281 				     uint32_t value)
1282 {
1283 	struct amdgpu_device *adev = ctx;
1284 
1285 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1286 }
1287 
1288 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1289 {
1290 	struct dmub_srv_create_params create_params;
1291 	struct dmub_srv_region_params region_params;
1292 	struct dmub_srv_region_info region_info;
1293 	struct dmub_srv_fb_params fb_params;
1294 	struct dmub_srv_fb_info *fb_info;
1295 	struct dmub_srv *dmub_srv;
1296 	const struct dmcub_firmware_header_v1_0 *hdr;
1297 	const char *fw_name_dmub;
1298 	enum dmub_asic dmub_asic;
1299 	enum dmub_status status;
1300 	int r;
1301 
1302 	switch (adev->asic_type) {
1303 	case CHIP_RENOIR:
1304 		dmub_asic = DMUB_ASIC_DCN21;
1305 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1306 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1307 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1308 		break;
1309 	case CHIP_SIENNA_CICHLID:
1310 		dmub_asic = DMUB_ASIC_DCN30;
1311 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1312 		break;
1313 	case CHIP_NAVY_FLOUNDER:
1314 		dmub_asic = DMUB_ASIC_DCN30;
1315 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1316 		break;
1317 	case CHIP_VANGOGH:
1318 		dmub_asic = DMUB_ASIC_DCN301;
1319 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1320 		break;
1321 	case CHIP_DIMGREY_CAVEFISH:
1322 		dmub_asic = DMUB_ASIC_DCN302;
1323 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1324 		break;
1325 
1326 	default:
1327 		/* ASIC doesn't support DMUB. */
1328 		return 0;
1329 	}
1330 
1331 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1332 	if (r) {
1333 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1334 		return 0;
1335 	}
1336 
1337 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1338 	if (r) {
1339 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1340 		return 0;
1341 	}
1342 
1343 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1344 
1345 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1346 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1347 			AMDGPU_UCODE_ID_DMCUB;
1348 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1349 			adev->dm.dmub_fw;
1350 		adev->firmware.fw_size +=
1351 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1352 
1353 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1354 			 adev->dm.dmcub_fw_version);
1355 	}
1356 
1357 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1358 
1359 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1360 	dmub_srv = adev->dm.dmub_srv;
1361 
1362 	if (!dmub_srv) {
1363 		DRM_ERROR("Failed to allocate DMUB service!\n");
1364 		return -ENOMEM;
1365 	}
1366 
1367 	memset(&create_params, 0, sizeof(create_params));
1368 	create_params.user_ctx = adev;
1369 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1370 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1371 	create_params.asic = dmub_asic;
1372 
1373 	/* Create the DMUB service. */
1374 	status = dmub_srv_create(dmub_srv, &create_params);
1375 	if (status != DMUB_STATUS_OK) {
1376 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1377 		return -EINVAL;
1378 	}
1379 
1380 	/* Calculate the size of all the regions for the DMUB service. */
1381 	memset(&region_params, 0, sizeof(region_params));
1382 
1383 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1384 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1385 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1386 	region_params.vbios_size = adev->bios_size;
1387 	region_params.fw_bss_data = region_params.bss_data_size ?
1388 		adev->dm.dmub_fw->data +
1389 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1390 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1391 	region_params.fw_inst_const =
1392 		adev->dm.dmub_fw->data +
1393 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1394 		PSP_HEADER_BYTES;
1395 
1396 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1397 					   &region_info);
1398 
1399 	if (status != DMUB_STATUS_OK) {
1400 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1401 		return -EINVAL;
1402 	}
1403 
1404 	/*
1405 	 * Allocate a framebuffer based on the total size of all the regions.
1406 	 * TODO: Move this into GART.
1407 	 */
1408 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1409 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1410 				    &adev->dm.dmub_bo_gpu_addr,
1411 				    &adev->dm.dmub_bo_cpu_addr);
1412 	if (r)
1413 		return r;
1414 
1415 	/* Rebase the regions on the framebuffer address. */
1416 	memset(&fb_params, 0, sizeof(fb_params));
1417 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1418 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1419 	fb_params.region_info = &region_info;
1420 
1421 	adev->dm.dmub_fb_info =
1422 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1423 	fb_info = adev->dm.dmub_fb_info;
1424 
1425 	if (!fb_info) {
1426 		DRM_ERROR(
1427 			"Failed to allocate framebuffer info for DMUB service!\n");
1428 		return -ENOMEM;
1429 	}
1430 
1431 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1432 	if (status != DMUB_STATUS_OK) {
1433 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1434 		return -EINVAL;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static int dm_sw_init(void *handle)
1441 {
1442 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1443 	int r;
1444 
1445 	r = dm_dmub_sw_init(adev);
1446 	if (r)
1447 		return r;
1448 
1449 	return load_dmcu_fw(adev);
1450 }
1451 
1452 static int dm_sw_fini(void *handle)
1453 {
1454 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1455 
1456 	kfree(adev->dm.dmub_fb_info);
1457 	adev->dm.dmub_fb_info = NULL;
1458 
1459 	if (adev->dm.dmub_srv) {
1460 		dmub_srv_destroy(adev->dm.dmub_srv);
1461 		adev->dm.dmub_srv = NULL;
1462 	}
1463 
1464 	release_firmware(adev->dm.dmub_fw);
1465 	adev->dm.dmub_fw = NULL;
1466 
1467 	release_firmware(adev->dm.fw_dmcu);
1468 	adev->dm.fw_dmcu = NULL;
1469 
1470 	return 0;
1471 }
1472 
1473 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1474 {
1475 	struct amdgpu_dm_connector *aconnector;
1476 	struct drm_connector *connector;
1477 	struct drm_connector_list_iter iter;
1478 	int ret = 0;
1479 
1480 	drm_connector_list_iter_begin(dev, &iter);
1481 	drm_for_each_connector_iter(connector, &iter) {
1482 		aconnector = to_amdgpu_dm_connector(connector);
1483 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1484 		    aconnector->mst_mgr.aux) {
1485 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1486 					 aconnector,
1487 					 aconnector->base.base.id);
1488 
1489 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1490 			if (ret < 0) {
1491 				DRM_ERROR("DM_MST: Failed to start MST\n");
1492 				aconnector->dc_link->type =
1493 					dc_connection_single;
1494 				break;
1495 			}
1496 		}
1497 	}
1498 	drm_connector_list_iter_end(&iter);
1499 
1500 	return ret;
1501 }
1502 
1503 static int dm_late_init(void *handle)
1504 {
1505 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506 
1507 	struct dmcu_iram_parameters params;
1508 	unsigned int linear_lut[16];
1509 	int i;
1510 	struct dmcu *dmcu = NULL;
1511 	bool ret = true;
1512 
1513 	dmcu = adev->dm.dc->res_pool->dmcu;
1514 
1515 	for (i = 0; i < 16; i++)
1516 		linear_lut[i] = 0xFFFF * i / 15;
1517 
1518 	params.set = 0;
1519 	params.backlight_ramping_start = 0xCCCC;
1520 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1521 	params.backlight_lut_array_size = 16;
1522 	params.backlight_lut_array = linear_lut;
1523 
1524 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1525 	 * 0xFFFF x 0.01 = 0x28F
1526 	 */
1527 	params.min_abm_backlight = 0x28F;
1528 
1529 	/* In the case where abm is implemented on dmcub,
1530 	 * dmcu object will be null.
1531 	 * ABM 2.4 and up are implemented on dmcub.
1532 	 */
1533 	if (dmcu)
1534 		ret = dmcu_load_iram(dmcu, params);
1535 	else if (adev->dm.dc->ctx->dmub_srv)
1536 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1537 
1538 	if (!ret)
1539 		return -EINVAL;
1540 
1541 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1542 }
1543 
1544 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1545 {
1546 	struct amdgpu_dm_connector *aconnector;
1547 	struct drm_connector *connector;
1548 	struct drm_connector_list_iter iter;
1549 	struct drm_dp_mst_topology_mgr *mgr;
1550 	int ret;
1551 	bool need_hotplug = false;
1552 
1553 	drm_connector_list_iter_begin(dev, &iter);
1554 	drm_for_each_connector_iter(connector, &iter) {
1555 		aconnector = to_amdgpu_dm_connector(connector);
1556 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1557 		    aconnector->mst_port)
1558 			continue;
1559 
1560 		mgr = &aconnector->mst_mgr;
1561 
1562 		if (suspend) {
1563 			drm_dp_mst_topology_mgr_suspend(mgr);
1564 		} else {
1565 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1566 			if (ret < 0) {
1567 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1568 				need_hotplug = true;
1569 			}
1570 		}
1571 	}
1572 	drm_connector_list_iter_end(&iter);
1573 
1574 	if (need_hotplug)
1575 		drm_kms_helper_hotplug_event(dev);
1576 }
1577 
1578 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1579 {
1580 	struct smu_context *smu = &adev->smu;
1581 	int ret = 0;
1582 
1583 	if (!is_support_sw_smu(adev))
1584 		return 0;
1585 
1586 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1587 	 * on window driver dc implementation.
1588 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1589 	 * should be passed to smu during boot up and resume from s3.
1590 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1591 	 * dcn20_resource_construct
1592 	 * then call pplib functions below to pass the settings to smu:
1593 	 * smu_set_watermarks_for_clock_ranges
1594 	 * smu_set_watermarks_table
1595 	 * navi10_set_watermarks_table
1596 	 * smu_write_watermarks_table
1597 	 *
1598 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1599 	 * dc has implemented different flow for window driver:
1600 	 * dc_hardware_init / dc_set_power_state
1601 	 * dcn10_init_hw
1602 	 * notify_wm_ranges
1603 	 * set_wm_ranges
1604 	 * -- Linux
1605 	 * smu_set_watermarks_for_clock_ranges
1606 	 * renoir_set_watermarks_table
1607 	 * smu_write_watermarks_table
1608 	 *
1609 	 * For Linux,
1610 	 * dc_hardware_init -> amdgpu_dm_init
1611 	 * dc_set_power_state --> dm_resume
1612 	 *
1613 	 * therefore, this function apply to navi10/12/14 but not Renoir
1614 	 * *
1615 	 */
1616 	switch(adev->asic_type) {
1617 	case CHIP_NAVI10:
1618 	case CHIP_NAVI14:
1619 	case CHIP_NAVI12:
1620 		break;
1621 	default:
1622 		return 0;
1623 	}
1624 
1625 	ret = smu_write_watermarks_table(smu);
1626 	if (ret) {
1627 		DRM_ERROR("Failed to update WMTABLE!\n");
1628 		return ret;
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 /**
1635  * dm_hw_init() - Initialize DC device
1636  * @handle: The base driver device containing the amdgpu_dm device.
1637  *
1638  * Initialize the &struct amdgpu_display_manager device. This involves calling
1639  * the initializers of each DM component, then populating the struct with them.
1640  *
1641  * Although the function implies hardware initialization, both hardware and
1642  * software are initialized here. Splitting them out to their relevant init
1643  * hooks is a future TODO item.
1644  *
1645  * Some notable things that are initialized here:
1646  *
1647  * - Display Core, both software and hardware
1648  * - DC modules that we need (freesync and color management)
1649  * - DRM software states
1650  * - Interrupt sources and handlers
1651  * - Vblank support
1652  * - Debug FS entries, if enabled
1653  */
1654 static int dm_hw_init(void *handle)
1655 {
1656 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 	/* Create DAL display manager */
1658 	amdgpu_dm_init(adev);
1659 	amdgpu_dm_hpd_init(adev);
1660 
1661 	return 0;
1662 }
1663 
1664 /**
1665  * dm_hw_fini() - Teardown DC device
1666  * @handle: The base driver device containing the amdgpu_dm device.
1667  *
1668  * Teardown components within &struct amdgpu_display_manager that require
1669  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1670  * were loaded. Also flush IRQ workqueues and disable them.
1671  */
1672 static int dm_hw_fini(void *handle)
1673 {
1674 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1675 
1676 	amdgpu_dm_hpd_fini(adev);
1677 
1678 	amdgpu_dm_irq_fini(adev);
1679 	amdgpu_dm_fini(adev);
1680 	return 0;
1681 }
1682 
1683 
1684 static int dm_enable_vblank(struct drm_crtc *crtc);
1685 static void dm_disable_vblank(struct drm_crtc *crtc);
1686 
1687 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1688 				 struct dc_state *state, bool enable)
1689 {
1690 	enum dc_irq_source irq_source;
1691 	struct amdgpu_crtc *acrtc;
1692 	int rc = -EBUSY;
1693 	int i = 0;
1694 
1695 	for (i = 0; i < state->stream_count; i++) {
1696 		acrtc = get_crtc_by_otg_inst(
1697 				adev, state->stream_status[i].primary_otg_inst);
1698 
1699 		if (acrtc && state->stream_status[i].plane_count != 0) {
1700 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1701 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1702 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1703 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1704 			if (rc)
1705 				DRM_WARN("Failed to %s pflip interrupts\n",
1706 					 enable ? "enable" : "disable");
1707 
1708 			if (enable) {
1709 				rc = dm_enable_vblank(&acrtc->base);
1710 				if (rc)
1711 					DRM_WARN("Failed to enable vblank interrupts\n");
1712 			} else {
1713 				dm_disable_vblank(&acrtc->base);
1714 			}
1715 
1716 		}
1717 	}
1718 
1719 }
1720 
1721 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1722 {
1723 	struct dc_state *context = NULL;
1724 	enum dc_status res = DC_ERROR_UNEXPECTED;
1725 	int i;
1726 	struct dc_stream_state *del_streams[MAX_PIPES];
1727 	int del_streams_count = 0;
1728 
1729 	memset(del_streams, 0, sizeof(del_streams));
1730 
1731 	context = dc_create_state(dc);
1732 	if (context == NULL)
1733 		goto context_alloc_fail;
1734 
1735 	dc_resource_state_copy_construct_current(dc, context);
1736 
1737 	/* First remove from context all streams */
1738 	for (i = 0; i < context->stream_count; i++) {
1739 		struct dc_stream_state *stream = context->streams[i];
1740 
1741 		del_streams[del_streams_count++] = stream;
1742 	}
1743 
1744 	/* Remove all planes for removed streams and then remove the streams */
1745 	for (i = 0; i < del_streams_count; i++) {
1746 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1747 			res = DC_FAIL_DETACH_SURFACES;
1748 			goto fail;
1749 		}
1750 
1751 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1752 		if (res != DC_OK)
1753 			goto fail;
1754 	}
1755 
1756 
1757 	res = dc_validate_global_state(dc, context, false);
1758 
1759 	if (res != DC_OK) {
1760 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1761 		goto fail;
1762 	}
1763 
1764 	res = dc_commit_state(dc, context);
1765 
1766 fail:
1767 	dc_release_state(context);
1768 
1769 context_alloc_fail:
1770 	return res;
1771 }
1772 
1773 static int dm_suspend(void *handle)
1774 {
1775 	struct amdgpu_device *adev = handle;
1776 	struct amdgpu_display_manager *dm = &adev->dm;
1777 	int ret = 0;
1778 
1779 	if (amdgpu_in_reset(adev)) {
1780 		mutex_lock(&dm->dc_lock);
1781 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1782 
1783 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1784 
1785 		amdgpu_dm_commit_zero_streams(dm->dc);
1786 
1787 		amdgpu_dm_irq_suspend(adev);
1788 
1789 		return ret;
1790 	}
1791 
1792 	WARN_ON(adev->dm.cached_state);
1793 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1794 
1795 	s3_handle_mst(adev_to_drm(adev), true);
1796 
1797 	amdgpu_dm_irq_suspend(adev);
1798 
1799 
1800 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1801 
1802 	return 0;
1803 }
1804 
1805 static struct amdgpu_dm_connector *
1806 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1807 					     struct drm_crtc *crtc)
1808 {
1809 	uint32_t i;
1810 	struct drm_connector_state *new_con_state;
1811 	struct drm_connector *connector;
1812 	struct drm_crtc *crtc_from_state;
1813 
1814 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1815 		crtc_from_state = new_con_state->crtc;
1816 
1817 		if (crtc_from_state == crtc)
1818 			return to_amdgpu_dm_connector(connector);
1819 	}
1820 
1821 	return NULL;
1822 }
1823 
1824 static void emulated_link_detect(struct dc_link *link)
1825 {
1826 	struct dc_sink_init_data sink_init_data = { 0 };
1827 	struct display_sink_capability sink_caps = { 0 };
1828 	enum dc_edid_status edid_status;
1829 	struct dc_context *dc_ctx = link->ctx;
1830 	struct dc_sink *sink = NULL;
1831 	struct dc_sink *prev_sink = NULL;
1832 
1833 	link->type = dc_connection_none;
1834 	prev_sink = link->local_sink;
1835 
1836 	if (prev_sink)
1837 		dc_sink_release(prev_sink);
1838 
1839 	switch (link->connector_signal) {
1840 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1841 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1842 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1843 		break;
1844 	}
1845 
1846 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1847 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1848 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1849 		break;
1850 	}
1851 
1852 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1853 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1854 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1855 		break;
1856 	}
1857 
1858 	case SIGNAL_TYPE_LVDS: {
1859 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1860 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1861 		break;
1862 	}
1863 
1864 	case SIGNAL_TYPE_EDP: {
1865 		sink_caps.transaction_type =
1866 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1867 		sink_caps.signal = SIGNAL_TYPE_EDP;
1868 		break;
1869 	}
1870 
1871 	case SIGNAL_TYPE_DISPLAY_PORT: {
1872 		sink_caps.transaction_type =
1873 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1874 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1875 		break;
1876 	}
1877 
1878 	default:
1879 		DC_ERROR("Invalid connector type! signal:%d\n",
1880 			link->connector_signal);
1881 		return;
1882 	}
1883 
1884 	sink_init_data.link = link;
1885 	sink_init_data.sink_signal = sink_caps.signal;
1886 
1887 	sink = dc_sink_create(&sink_init_data);
1888 	if (!sink) {
1889 		DC_ERROR("Failed to create sink!\n");
1890 		return;
1891 	}
1892 
1893 	/* dc_sink_create returns a new reference */
1894 	link->local_sink = sink;
1895 
1896 	edid_status = dm_helpers_read_local_edid(
1897 			link->ctx,
1898 			link,
1899 			sink);
1900 
1901 	if (edid_status != EDID_OK)
1902 		DC_ERROR("Failed to read EDID");
1903 
1904 }
1905 
1906 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1907 				     struct amdgpu_display_manager *dm)
1908 {
1909 	struct {
1910 		struct dc_surface_update surface_updates[MAX_SURFACES];
1911 		struct dc_plane_info plane_infos[MAX_SURFACES];
1912 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1913 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1914 		struct dc_stream_update stream_update;
1915 	} * bundle;
1916 	int k, m;
1917 
1918 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1919 
1920 	if (!bundle) {
1921 		dm_error("Failed to allocate update bundle\n");
1922 		goto cleanup;
1923 	}
1924 
1925 	for (k = 0; k < dc_state->stream_count; k++) {
1926 		bundle->stream_update.stream = dc_state->streams[k];
1927 
1928 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1929 			bundle->surface_updates[m].surface =
1930 				dc_state->stream_status->plane_states[m];
1931 			bundle->surface_updates[m].surface->force_full_update =
1932 				true;
1933 		}
1934 		dc_commit_updates_for_stream(
1935 			dm->dc, bundle->surface_updates,
1936 			dc_state->stream_status->plane_count,
1937 			dc_state->streams[k], &bundle->stream_update);
1938 	}
1939 
1940 cleanup:
1941 	kfree(bundle);
1942 
1943 	return;
1944 }
1945 
1946 static void dm_set_dpms_off(struct dc_link *link)
1947 {
1948 	struct dc_stream_state *stream_state;
1949 	struct amdgpu_dm_connector *aconnector = link->priv;
1950 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1951 	struct dc_stream_update stream_update;
1952 	bool dpms_off = true;
1953 
1954 	memset(&stream_update, 0, sizeof(stream_update));
1955 	stream_update.dpms_off = &dpms_off;
1956 
1957 	mutex_lock(&adev->dm.dc_lock);
1958 	stream_state = dc_stream_find_from_link(link);
1959 
1960 	if (stream_state == NULL) {
1961 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1962 		mutex_unlock(&adev->dm.dc_lock);
1963 		return;
1964 	}
1965 
1966 	stream_update.stream = stream_state;
1967 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1968 				     stream_state, &stream_update);
1969 	mutex_unlock(&adev->dm.dc_lock);
1970 }
1971 
1972 static int dm_resume(void *handle)
1973 {
1974 	struct amdgpu_device *adev = handle;
1975 	struct drm_device *ddev = adev_to_drm(adev);
1976 	struct amdgpu_display_manager *dm = &adev->dm;
1977 	struct amdgpu_dm_connector *aconnector;
1978 	struct drm_connector *connector;
1979 	struct drm_connector_list_iter iter;
1980 	struct drm_crtc *crtc;
1981 	struct drm_crtc_state *new_crtc_state;
1982 	struct dm_crtc_state *dm_new_crtc_state;
1983 	struct drm_plane *plane;
1984 	struct drm_plane_state *new_plane_state;
1985 	struct dm_plane_state *dm_new_plane_state;
1986 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1987 	enum dc_connection_type new_connection_type = dc_connection_none;
1988 	struct dc_state *dc_state;
1989 	int i, r, j;
1990 
1991 	if (amdgpu_in_reset(adev)) {
1992 		dc_state = dm->cached_dc_state;
1993 
1994 		r = dm_dmub_hw_init(adev);
1995 		if (r)
1996 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1997 
1998 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1999 		dc_resume(dm->dc);
2000 
2001 		amdgpu_dm_irq_resume_early(adev);
2002 
2003 		for (i = 0; i < dc_state->stream_count; i++) {
2004 			dc_state->streams[i]->mode_changed = true;
2005 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2006 				dc_state->stream_status->plane_states[j]->update_flags.raw
2007 					= 0xffffffff;
2008 			}
2009 		}
2010 
2011 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2012 
2013 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2014 
2015 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2016 
2017 		dc_release_state(dm->cached_dc_state);
2018 		dm->cached_dc_state = NULL;
2019 
2020 		amdgpu_dm_irq_resume_late(adev);
2021 
2022 		mutex_unlock(&dm->dc_lock);
2023 
2024 		return 0;
2025 	}
2026 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2027 	dc_release_state(dm_state->context);
2028 	dm_state->context = dc_create_state(dm->dc);
2029 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2030 	dc_resource_state_construct(dm->dc, dm_state->context);
2031 
2032 	/* Before powering on DC we need to re-initialize DMUB. */
2033 	r = dm_dmub_hw_init(adev);
2034 	if (r)
2035 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2036 
2037 	/* power on hardware */
2038 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2039 
2040 	/* program HPD filter */
2041 	dc_resume(dm->dc);
2042 
2043 	/*
2044 	 * early enable HPD Rx IRQ, should be done before set mode as short
2045 	 * pulse interrupts are used for MST
2046 	 */
2047 	amdgpu_dm_irq_resume_early(adev);
2048 
2049 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2050 	s3_handle_mst(ddev, false);
2051 
2052 	/* Do detection*/
2053 	drm_connector_list_iter_begin(ddev, &iter);
2054 	drm_for_each_connector_iter(connector, &iter) {
2055 		aconnector = to_amdgpu_dm_connector(connector);
2056 
2057 		/*
2058 		 * this is the case when traversing through already created
2059 		 * MST connectors, should be skipped
2060 		 */
2061 		if (aconnector->mst_port)
2062 			continue;
2063 
2064 		mutex_lock(&aconnector->hpd_lock);
2065 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2066 			DRM_ERROR("KMS: Failed to detect connector\n");
2067 
2068 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2069 			emulated_link_detect(aconnector->dc_link);
2070 		else
2071 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2072 
2073 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2074 			aconnector->fake_enable = false;
2075 
2076 		if (aconnector->dc_sink)
2077 			dc_sink_release(aconnector->dc_sink);
2078 		aconnector->dc_sink = NULL;
2079 		amdgpu_dm_update_connector_after_detect(aconnector);
2080 		mutex_unlock(&aconnector->hpd_lock);
2081 	}
2082 	drm_connector_list_iter_end(&iter);
2083 
2084 	/* Force mode set in atomic commit */
2085 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2086 		new_crtc_state->active_changed = true;
2087 
2088 	/*
2089 	 * atomic_check is expected to create the dc states. We need to release
2090 	 * them here, since they were duplicated as part of the suspend
2091 	 * procedure.
2092 	 */
2093 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2094 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2095 		if (dm_new_crtc_state->stream) {
2096 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2097 			dc_stream_release(dm_new_crtc_state->stream);
2098 			dm_new_crtc_state->stream = NULL;
2099 		}
2100 	}
2101 
2102 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2103 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2104 		if (dm_new_plane_state->dc_state) {
2105 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2106 			dc_plane_state_release(dm_new_plane_state->dc_state);
2107 			dm_new_plane_state->dc_state = NULL;
2108 		}
2109 	}
2110 
2111 	drm_atomic_helper_resume(ddev, dm->cached_state);
2112 
2113 	dm->cached_state = NULL;
2114 
2115 	amdgpu_dm_irq_resume_late(adev);
2116 
2117 	amdgpu_dm_smu_write_watermarks_table(adev);
2118 
2119 	return 0;
2120 }
2121 
2122 /**
2123  * DOC: DM Lifecycle
2124  *
2125  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2126  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2127  * the base driver's device list to be initialized and torn down accordingly.
2128  *
2129  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2130  */
2131 
2132 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2133 	.name = "dm",
2134 	.early_init = dm_early_init,
2135 	.late_init = dm_late_init,
2136 	.sw_init = dm_sw_init,
2137 	.sw_fini = dm_sw_fini,
2138 	.hw_init = dm_hw_init,
2139 	.hw_fini = dm_hw_fini,
2140 	.suspend = dm_suspend,
2141 	.resume = dm_resume,
2142 	.is_idle = dm_is_idle,
2143 	.wait_for_idle = dm_wait_for_idle,
2144 	.check_soft_reset = dm_check_soft_reset,
2145 	.soft_reset = dm_soft_reset,
2146 	.set_clockgating_state = dm_set_clockgating_state,
2147 	.set_powergating_state = dm_set_powergating_state,
2148 };
2149 
2150 const struct amdgpu_ip_block_version dm_ip_block =
2151 {
2152 	.type = AMD_IP_BLOCK_TYPE_DCE,
2153 	.major = 1,
2154 	.minor = 0,
2155 	.rev = 0,
2156 	.funcs = &amdgpu_dm_funcs,
2157 };
2158 
2159 
2160 /**
2161  * DOC: atomic
2162  *
2163  * *WIP*
2164  */
2165 
2166 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2167 	.fb_create = amdgpu_display_user_framebuffer_create,
2168 	.get_format_info = amd_get_format_info,
2169 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2170 	.atomic_check = amdgpu_dm_atomic_check,
2171 	.atomic_commit = drm_atomic_helper_commit,
2172 };
2173 
2174 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2175 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2176 };
2177 
2178 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2179 {
2180 	u32 max_cll, min_cll, max, min, q, r;
2181 	struct amdgpu_dm_backlight_caps *caps;
2182 	struct amdgpu_display_manager *dm;
2183 	struct drm_connector *conn_base;
2184 	struct amdgpu_device *adev;
2185 	struct dc_link *link = NULL;
2186 	static const u8 pre_computed_values[] = {
2187 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2188 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2189 
2190 	if (!aconnector || !aconnector->dc_link)
2191 		return;
2192 
2193 	link = aconnector->dc_link;
2194 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2195 		return;
2196 
2197 	conn_base = &aconnector->base;
2198 	adev = drm_to_adev(conn_base->dev);
2199 	dm = &adev->dm;
2200 	caps = &dm->backlight_caps;
2201 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2202 	caps->aux_support = false;
2203 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2204 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2205 
2206 	if (caps->ext_caps->bits.oled == 1 ||
2207 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2208 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2209 		caps->aux_support = true;
2210 
2211 	/* From the specification (CTA-861-G), for calculating the maximum
2212 	 * luminance we need to use:
2213 	 *	Luminance = 50*2**(CV/32)
2214 	 * Where CV is a one-byte value.
2215 	 * For calculating this expression we may need float point precision;
2216 	 * to avoid this complexity level, we take advantage that CV is divided
2217 	 * by a constant. From the Euclids division algorithm, we know that CV
2218 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2219 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2220 	 * need to pre-compute the value of r/32. For pre-computing the values
2221 	 * We just used the following Ruby line:
2222 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2223 	 * The results of the above expressions can be verified at
2224 	 * pre_computed_values.
2225 	 */
2226 	q = max_cll >> 5;
2227 	r = max_cll % 32;
2228 	max = (1 << q) * pre_computed_values[r];
2229 
2230 	// min luminance: maxLum * (CV/255)^2 / 100
2231 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2232 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2233 
2234 	caps->aux_max_input_signal = max;
2235 	caps->aux_min_input_signal = min;
2236 }
2237 
2238 void amdgpu_dm_update_connector_after_detect(
2239 		struct amdgpu_dm_connector *aconnector)
2240 {
2241 	struct drm_connector *connector = &aconnector->base;
2242 	struct drm_device *dev = connector->dev;
2243 	struct dc_sink *sink;
2244 
2245 	/* MST handled by drm_mst framework */
2246 	if (aconnector->mst_mgr.mst_state == true)
2247 		return;
2248 
2249 	sink = aconnector->dc_link->local_sink;
2250 	if (sink)
2251 		dc_sink_retain(sink);
2252 
2253 	/*
2254 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2255 	 * the connector sink is set to either fake or physical sink depends on link status.
2256 	 * Skip if already done during boot.
2257 	 */
2258 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2259 			&& aconnector->dc_em_sink) {
2260 
2261 		/*
2262 		 * For S3 resume with headless use eml_sink to fake stream
2263 		 * because on resume connector->sink is set to NULL
2264 		 */
2265 		mutex_lock(&dev->mode_config.mutex);
2266 
2267 		if (sink) {
2268 			if (aconnector->dc_sink) {
2269 				amdgpu_dm_update_freesync_caps(connector, NULL);
2270 				/*
2271 				 * retain and release below are used to
2272 				 * bump up refcount for sink because the link doesn't point
2273 				 * to it anymore after disconnect, so on next crtc to connector
2274 				 * reshuffle by UMD we will get into unwanted dc_sink release
2275 				 */
2276 				dc_sink_release(aconnector->dc_sink);
2277 			}
2278 			aconnector->dc_sink = sink;
2279 			dc_sink_retain(aconnector->dc_sink);
2280 			amdgpu_dm_update_freesync_caps(connector,
2281 					aconnector->edid);
2282 		} else {
2283 			amdgpu_dm_update_freesync_caps(connector, NULL);
2284 			if (!aconnector->dc_sink) {
2285 				aconnector->dc_sink = aconnector->dc_em_sink;
2286 				dc_sink_retain(aconnector->dc_sink);
2287 			}
2288 		}
2289 
2290 		mutex_unlock(&dev->mode_config.mutex);
2291 
2292 		if (sink)
2293 			dc_sink_release(sink);
2294 		return;
2295 	}
2296 
2297 	/*
2298 	 * TODO: temporary guard to look for proper fix
2299 	 * if this sink is MST sink, we should not do anything
2300 	 */
2301 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2302 		dc_sink_release(sink);
2303 		return;
2304 	}
2305 
2306 	if (aconnector->dc_sink == sink) {
2307 		/*
2308 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2309 		 * Do nothing!!
2310 		 */
2311 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2312 				aconnector->connector_id);
2313 		if (sink)
2314 			dc_sink_release(sink);
2315 		return;
2316 	}
2317 
2318 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2319 		aconnector->connector_id, aconnector->dc_sink, sink);
2320 
2321 	mutex_lock(&dev->mode_config.mutex);
2322 
2323 	/*
2324 	 * 1. Update status of the drm connector
2325 	 * 2. Send an event and let userspace tell us what to do
2326 	 */
2327 	if (sink) {
2328 		/*
2329 		 * TODO: check if we still need the S3 mode update workaround.
2330 		 * If yes, put it here.
2331 		 */
2332 		if (aconnector->dc_sink) {
2333 			amdgpu_dm_update_freesync_caps(connector, NULL);
2334 			dc_sink_release(aconnector->dc_sink);
2335 		}
2336 
2337 		aconnector->dc_sink = sink;
2338 		dc_sink_retain(aconnector->dc_sink);
2339 		if (sink->dc_edid.length == 0) {
2340 			aconnector->edid = NULL;
2341 			if (aconnector->dc_link->aux_mode) {
2342 				drm_dp_cec_unset_edid(
2343 					&aconnector->dm_dp_aux.aux);
2344 			}
2345 		} else {
2346 			aconnector->edid =
2347 				(struct edid *)sink->dc_edid.raw_edid;
2348 
2349 			drm_connector_update_edid_property(connector,
2350 							   aconnector->edid);
2351 			if (aconnector->dc_link->aux_mode)
2352 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2353 						    aconnector->edid);
2354 		}
2355 
2356 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2357 		update_connector_ext_caps(aconnector);
2358 	} else {
2359 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2360 		amdgpu_dm_update_freesync_caps(connector, NULL);
2361 		drm_connector_update_edid_property(connector, NULL);
2362 		aconnector->num_modes = 0;
2363 		dc_sink_release(aconnector->dc_sink);
2364 		aconnector->dc_sink = NULL;
2365 		aconnector->edid = NULL;
2366 #ifdef CONFIG_DRM_AMD_DC_HDCP
2367 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2368 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2369 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2370 #endif
2371 	}
2372 
2373 	mutex_unlock(&dev->mode_config.mutex);
2374 
2375 	update_subconnector_property(aconnector);
2376 
2377 	if (sink)
2378 		dc_sink_release(sink);
2379 }
2380 
2381 static void handle_hpd_irq(void *param)
2382 {
2383 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2384 	struct drm_connector *connector = &aconnector->base;
2385 	struct drm_device *dev = connector->dev;
2386 	enum dc_connection_type new_connection_type = dc_connection_none;
2387 #ifdef CONFIG_DRM_AMD_DC_HDCP
2388 	struct amdgpu_device *adev = drm_to_adev(dev);
2389 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2390 #endif
2391 
2392 	/*
2393 	 * In case of failure or MST no need to update connector status or notify the OS
2394 	 * since (for MST case) MST does this in its own context.
2395 	 */
2396 	mutex_lock(&aconnector->hpd_lock);
2397 
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399 	if (adev->dm.hdcp_workqueue) {
2400 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2401 		dm_con_state->update_hdcp = true;
2402 	}
2403 #endif
2404 	if (aconnector->fake_enable)
2405 		aconnector->fake_enable = false;
2406 
2407 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2408 		DRM_ERROR("KMS: Failed to detect connector\n");
2409 
2410 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2411 		emulated_link_detect(aconnector->dc_link);
2412 
2413 
2414 		drm_modeset_lock_all(dev);
2415 		dm_restore_drm_connector_state(dev, connector);
2416 		drm_modeset_unlock_all(dev);
2417 
2418 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2419 			drm_kms_helper_hotplug_event(dev);
2420 
2421 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2422 		if (new_connection_type == dc_connection_none &&
2423 		    aconnector->dc_link->type == dc_connection_none)
2424 			dm_set_dpms_off(aconnector->dc_link);
2425 
2426 		amdgpu_dm_update_connector_after_detect(aconnector);
2427 
2428 		drm_modeset_lock_all(dev);
2429 		dm_restore_drm_connector_state(dev, connector);
2430 		drm_modeset_unlock_all(dev);
2431 
2432 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2433 			drm_kms_helper_hotplug_event(dev);
2434 	}
2435 	mutex_unlock(&aconnector->hpd_lock);
2436 
2437 }
2438 
2439 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2440 {
2441 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2442 	uint8_t dret;
2443 	bool new_irq_handled = false;
2444 	int dpcd_addr;
2445 	int dpcd_bytes_to_read;
2446 
2447 	const int max_process_count = 30;
2448 	int process_count = 0;
2449 
2450 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2451 
2452 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2453 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2454 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2455 		dpcd_addr = DP_SINK_COUNT;
2456 	} else {
2457 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2458 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2459 		dpcd_addr = DP_SINK_COUNT_ESI;
2460 	}
2461 
2462 	dret = drm_dp_dpcd_read(
2463 		&aconnector->dm_dp_aux.aux,
2464 		dpcd_addr,
2465 		esi,
2466 		dpcd_bytes_to_read);
2467 
2468 	while (dret == dpcd_bytes_to_read &&
2469 		process_count < max_process_count) {
2470 		uint8_t retry;
2471 		dret = 0;
2472 
2473 		process_count++;
2474 
2475 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2476 		/* handle HPD short pulse irq */
2477 		if (aconnector->mst_mgr.mst_state)
2478 			drm_dp_mst_hpd_irq(
2479 				&aconnector->mst_mgr,
2480 				esi,
2481 				&new_irq_handled);
2482 
2483 		if (new_irq_handled) {
2484 			/* ACK at DPCD to notify down stream */
2485 			const int ack_dpcd_bytes_to_write =
2486 				dpcd_bytes_to_read - 1;
2487 
2488 			for (retry = 0; retry < 3; retry++) {
2489 				uint8_t wret;
2490 
2491 				wret = drm_dp_dpcd_write(
2492 					&aconnector->dm_dp_aux.aux,
2493 					dpcd_addr + 1,
2494 					&esi[1],
2495 					ack_dpcd_bytes_to_write);
2496 				if (wret == ack_dpcd_bytes_to_write)
2497 					break;
2498 			}
2499 
2500 			/* check if there is new irq to be handled */
2501 			dret = drm_dp_dpcd_read(
2502 				&aconnector->dm_dp_aux.aux,
2503 				dpcd_addr,
2504 				esi,
2505 				dpcd_bytes_to_read);
2506 
2507 			new_irq_handled = false;
2508 		} else {
2509 			break;
2510 		}
2511 	}
2512 
2513 	if (process_count == max_process_count)
2514 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2515 }
2516 
2517 static void handle_hpd_rx_irq(void *param)
2518 {
2519 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2520 	struct drm_connector *connector = &aconnector->base;
2521 	struct drm_device *dev = connector->dev;
2522 	struct dc_link *dc_link = aconnector->dc_link;
2523 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2524 	bool result = false;
2525 	enum dc_connection_type new_connection_type = dc_connection_none;
2526 	struct amdgpu_device *adev = drm_to_adev(dev);
2527 	union hpd_irq_data hpd_irq_data;
2528 
2529 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2530 
2531 	/*
2532 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2533 	 * conflict, after implement i2c helper, this mutex should be
2534 	 * retired.
2535 	 */
2536 	if (dc_link->type != dc_connection_mst_branch)
2537 		mutex_lock(&aconnector->hpd_lock);
2538 
2539 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2540 
2541 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2542 		(dc_link->type == dc_connection_mst_branch)) {
2543 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2544 			result = true;
2545 			dm_handle_hpd_rx_irq(aconnector);
2546 			goto out;
2547 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2548 			result = false;
2549 			dm_handle_hpd_rx_irq(aconnector);
2550 			goto out;
2551 		}
2552 	}
2553 
2554 	mutex_lock(&adev->dm.dc_lock);
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2557 #else
2558 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2559 #endif
2560 	mutex_unlock(&adev->dm.dc_lock);
2561 
2562 out:
2563 	if (result && !is_mst_root_connector) {
2564 		/* Downstream Port status changed. */
2565 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2566 			DRM_ERROR("KMS: Failed to detect connector\n");
2567 
2568 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2569 			emulated_link_detect(dc_link);
2570 
2571 			if (aconnector->fake_enable)
2572 				aconnector->fake_enable = false;
2573 
2574 			amdgpu_dm_update_connector_after_detect(aconnector);
2575 
2576 
2577 			drm_modeset_lock_all(dev);
2578 			dm_restore_drm_connector_state(dev, connector);
2579 			drm_modeset_unlock_all(dev);
2580 
2581 			drm_kms_helper_hotplug_event(dev);
2582 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2583 
2584 			if (aconnector->fake_enable)
2585 				aconnector->fake_enable = false;
2586 
2587 			amdgpu_dm_update_connector_after_detect(aconnector);
2588 
2589 
2590 			drm_modeset_lock_all(dev);
2591 			dm_restore_drm_connector_state(dev, connector);
2592 			drm_modeset_unlock_all(dev);
2593 
2594 			drm_kms_helper_hotplug_event(dev);
2595 		}
2596 	}
2597 #ifdef CONFIG_DRM_AMD_DC_HDCP
2598 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2599 		if (adev->dm.hdcp_workqueue)
2600 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2601 	}
2602 #endif
2603 
2604 	if (dc_link->type != dc_connection_mst_branch) {
2605 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2606 		mutex_unlock(&aconnector->hpd_lock);
2607 	}
2608 }
2609 
2610 static void register_hpd_handlers(struct amdgpu_device *adev)
2611 {
2612 	struct drm_device *dev = adev_to_drm(adev);
2613 	struct drm_connector *connector;
2614 	struct amdgpu_dm_connector *aconnector;
2615 	const struct dc_link *dc_link;
2616 	struct dc_interrupt_params int_params = {0};
2617 
2618 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2619 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2620 
2621 	list_for_each_entry(connector,
2622 			&dev->mode_config.connector_list, head)	{
2623 
2624 		aconnector = to_amdgpu_dm_connector(connector);
2625 		dc_link = aconnector->dc_link;
2626 
2627 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2628 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2629 			int_params.irq_source = dc_link->irq_source_hpd;
2630 
2631 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632 					handle_hpd_irq,
2633 					(void *) aconnector);
2634 		}
2635 
2636 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2637 
2638 			/* Also register for DP short pulse (hpd_rx). */
2639 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2640 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2641 
2642 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2643 					handle_hpd_rx_irq,
2644 					(void *) aconnector);
2645 		}
2646 	}
2647 }
2648 
2649 #if defined(CONFIG_DRM_AMD_DC_SI)
2650 /* Register IRQ sources and initialize IRQ callbacks */
2651 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2652 {
2653 	struct dc *dc = adev->dm.dc;
2654 	struct common_irq_params *c_irq_params;
2655 	struct dc_interrupt_params int_params = {0};
2656 	int r;
2657 	int i;
2658 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2659 
2660 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2661 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2662 
2663 	/*
2664 	 * Actions of amdgpu_irq_add_id():
2665 	 * 1. Register a set() function with base driver.
2666 	 *    Base driver will call set() function to enable/disable an
2667 	 *    interrupt in DC hardware.
2668 	 * 2. Register amdgpu_dm_irq_handler().
2669 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2670 	 *    coming from DC hardware.
2671 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2672 	 *    for acknowledging and handling. */
2673 
2674 	/* Use VBLANK interrupt */
2675 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2676 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2677 		if (r) {
2678 			DRM_ERROR("Failed to add crtc irq id!\n");
2679 			return r;
2680 		}
2681 
2682 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683 		int_params.irq_source =
2684 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2685 
2686 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2687 
2688 		c_irq_params->adev = adev;
2689 		c_irq_params->irq_src = int_params.irq_source;
2690 
2691 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 				dm_crtc_high_irq, c_irq_params);
2693 	}
2694 
2695 	/* Use GRPH_PFLIP interrupt */
2696 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2697 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2698 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2699 		if (r) {
2700 			DRM_ERROR("Failed to add page flip irq id!\n");
2701 			return r;
2702 		}
2703 
2704 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2705 		int_params.irq_source =
2706 			dc_interrupt_to_irq_source(dc, i, 0);
2707 
2708 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2709 
2710 		c_irq_params->adev = adev;
2711 		c_irq_params->irq_src = int_params.irq_source;
2712 
2713 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2714 				dm_pflip_high_irq, c_irq_params);
2715 
2716 	}
2717 
2718 	/* HPD */
2719 	r = amdgpu_irq_add_id(adev, client_id,
2720 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2721 	if (r) {
2722 		DRM_ERROR("Failed to add hpd irq id!\n");
2723 		return r;
2724 	}
2725 
2726 	register_hpd_handlers(adev);
2727 
2728 	return 0;
2729 }
2730 #endif
2731 
2732 /* Register IRQ sources and initialize IRQ callbacks */
2733 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2734 {
2735 	struct dc *dc = adev->dm.dc;
2736 	struct common_irq_params *c_irq_params;
2737 	struct dc_interrupt_params int_params = {0};
2738 	int r;
2739 	int i;
2740 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2741 
2742 	if (adev->asic_type >= CHIP_VEGA10)
2743 		client_id = SOC15_IH_CLIENTID_DCE;
2744 
2745 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2746 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2747 
2748 	/*
2749 	 * Actions of amdgpu_irq_add_id():
2750 	 * 1. Register a set() function with base driver.
2751 	 *    Base driver will call set() function to enable/disable an
2752 	 *    interrupt in DC hardware.
2753 	 * 2. Register amdgpu_dm_irq_handler().
2754 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2755 	 *    coming from DC hardware.
2756 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2757 	 *    for acknowledging and handling. */
2758 
2759 	/* Use VBLANK interrupt */
2760 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2761 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2762 		if (r) {
2763 			DRM_ERROR("Failed to add crtc irq id!\n");
2764 			return r;
2765 		}
2766 
2767 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2768 		int_params.irq_source =
2769 			dc_interrupt_to_irq_source(dc, i, 0);
2770 
2771 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2772 
2773 		c_irq_params->adev = adev;
2774 		c_irq_params->irq_src = int_params.irq_source;
2775 
2776 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2777 				dm_crtc_high_irq, c_irq_params);
2778 	}
2779 
2780 	/* Use VUPDATE interrupt */
2781 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2782 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2783 		if (r) {
2784 			DRM_ERROR("Failed to add vupdate irq id!\n");
2785 			return r;
2786 		}
2787 
2788 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2789 		int_params.irq_source =
2790 			dc_interrupt_to_irq_source(dc, i, 0);
2791 
2792 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2793 
2794 		c_irq_params->adev = adev;
2795 		c_irq_params->irq_src = int_params.irq_source;
2796 
2797 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2798 				dm_vupdate_high_irq, c_irq_params);
2799 	}
2800 
2801 	/* Use GRPH_PFLIP interrupt */
2802 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2803 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2804 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2805 		if (r) {
2806 			DRM_ERROR("Failed to add page flip irq id!\n");
2807 			return r;
2808 		}
2809 
2810 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811 		int_params.irq_source =
2812 			dc_interrupt_to_irq_source(dc, i, 0);
2813 
2814 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2815 
2816 		c_irq_params->adev = adev;
2817 		c_irq_params->irq_src = int_params.irq_source;
2818 
2819 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820 				dm_pflip_high_irq, c_irq_params);
2821 
2822 	}
2823 
2824 	/* HPD */
2825 	r = amdgpu_irq_add_id(adev, client_id,
2826 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2827 	if (r) {
2828 		DRM_ERROR("Failed to add hpd irq id!\n");
2829 		return r;
2830 	}
2831 
2832 	register_hpd_handlers(adev);
2833 
2834 	return 0;
2835 }
2836 
2837 #if defined(CONFIG_DRM_AMD_DC_DCN)
2838 /* Register IRQ sources and initialize IRQ callbacks */
2839 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2840 {
2841 	struct dc *dc = adev->dm.dc;
2842 	struct common_irq_params *c_irq_params;
2843 	struct dc_interrupt_params int_params = {0};
2844 	int r;
2845 	int i;
2846 
2847 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2848 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2849 
2850 	/*
2851 	 * Actions of amdgpu_irq_add_id():
2852 	 * 1. Register a set() function with base driver.
2853 	 *    Base driver will call set() function to enable/disable an
2854 	 *    interrupt in DC hardware.
2855 	 * 2. Register amdgpu_dm_irq_handler().
2856 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2857 	 *    coming from DC hardware.
2858 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2859 	 *    for acknowledging and handling.
2860 	 */
2861 
2862 	/* Use VSTARTUP interrupt */
2863 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2864 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2865 			i++) {
2866 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2867 
2868 		if (r) {
2869 			DRM_ERROR("Failed to add crtc irq id!\n");
2870 			return r;
2871 		}
2872 
2873 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2874 		int_params.irq_source =
2875 			dc_interrupt_to_irq_source(dc, i, 0);
2876 
2877 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2878 
2879 		c_irq_params->adev = adev;
2880 		c_irq_params->irq_src = int_params.irq_source;
2881 
2882 		amdgpu_dm_irq_register_interrupt(
2883 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2884 	}
2885 
2886 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2887 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2888 	 * to trigger at end of each vblank, regardless of state of the lock,
2889 	 * matching DCE behaviour.
2890 	 */
2891 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2892 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2893 	     i++) {
2894 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2895 
2896 		if (r) {
2897 			DRM_ERROR("Failed to add vupdate irq id!\n");
2898 			return r;
2899 		}
2900 
2901 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 		int_params.irq_source =
2903 			dc_interrupt_to_irq_source(dc, i, 0);
2904 
2905 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2906 
2907 		c_irq_params->adev = adev;
2908 		c_irq_params->irq_src = int_params.irq_source;
2909 
2910 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911 				dm_vupdate_high_irq, c_irq_params);
2912 	}
2913 
2914 	/* Use GRPH_PFLIP interrupt */
2915 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2916 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2917 			i++) {
2918 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2919 		if (r) {
2920 			DRM_ERROR("Failed to add page flip irq id!\n");
2921 			return r;
2922 		}
2923 
2924 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2925 		int_params.irq_source =
2926 			dc_interrupt_to_irq_source(dc, i, 0);
2927 
2928 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2929 
2930 		c_irq_params->adev = adev;
2931 		c_irq_params->irq_src = int_params.irq_source;
2932 
2933 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2934 				dm_pflip_high_irq, c_irq_params);
2935 
2936 	}
2937 
2938 	/* HPD */
2939 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2940 			&adev->hpd_irq);
2941 	if (r) {
2942 		DRM_ERROR("Failed to add hpd irq id!\n");
2943 		return r;
2944 	}
2945 
2946 	register_hpd_handlers(adev);
2947 
2948 	return 0;
2949 }
2950 #endif
2951 
2952 /*
2953  * Acquires the lock for the atomic state object and returns
2954  * the new atomic state.
2955  *
2956  * This should only be called during atomic check.
2957  */
2958 static int dm_atomic_get_state(struct drm_atomic_state *state,
2959 			       struct dm_atomic_state **dm_state)
2960 {
2961 	struct drm_device *dev = state->dev;
2962 	struct amdgpu_device *adev = drm_to_adev(dev);
2963 	struct amdgpu_display_manager *dm = &adev->dm;
2964 	struct drm_private_state *priv_state;
2965 
2966 	if (*dm_state)
2967 		return 0;
2968 
2969 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2970 	if (IS_ERR(priv_state))
2971 		return PTR_ERR(priv_state);
2972 
2973 	*dm_state = to_dm_atomic_state(priv_state);
2974 
2975 	return 0;
2976 }
2977 
2978 static struct dm_atomic_state *
2979 dm_atomic_get_new_state(struct drm_atomic_state *state)
2980 {
2981 	struct drm_device *dev = state->dev;
2982 	struct amdgpu_device *adev = drm_to_adev(dev);
2983 	struct amdgpu_display_manager *dm = &adev->dm;
2984 	struct drm_private_obj *obj;
2985 	struct drm_private_state *new_obj_state;
2986 	int i;
2987 
2988 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2989 		if (obj->funcs == dm->atomic_obj.funcs)
2990 			return to_dm_atomic_state(new_obj_state);
2991 	}
2992 
2993 	return NULL;
2994 }
2995 
2996 static struct drm_private_state *
2997 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2998 {
2999 	struct dm_atomic_state *old_state, *new_state;
3000 
3001 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3002 	if (!new_state)
3003 		return NULL;
3004 
3005 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3006 
3007 	old_state = to_dm_atomic_state(obj->state);
3008 
3009 	if (old_state && old_state->context)
3010 		new_state->context = dc_copy_state(old_state->context);
3011 
3012 	if (!new_state->context) {
3013 		kfree(new_state);
3014 		return NULL;
3015 	}
3016 
3017 	return &new_state->base;
3018 }
3019 
3020 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3021 				    struct drm_private_state *state)
3022 {
3023 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3024 
3025 	if (dm_state && dm_state->context)
3026 		dc_release_state(dm_state->context);
3027 
3028 	kfree(dm_state);
3029 }
3030 
3031 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3032 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3033 	.atomic_destroy_state = dm_atomic_destroy_state,
3034 };
3035 
3036 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3037 {
3038 	struct dm_atomic_state *state;
3039 	int r;
3040 
3041 	adev->mode_info.mode_config_initialized = true;
3042 
3043 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3044 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3045 
3046 	adev_to_drm(adev)->mode_config.max_width = 16384;
3047 	adev_to_drm(adev)->mode_config.max_height = 16384;
3048 
3049 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3050 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3051 	/* indicates support for immediate flip */
3052 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3053 
3054 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3055 
3056 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3057 	if (!state)
3058 		return -ENOMEM;
3059 
3060 	state->context = dc_create_state(adev->dm.dc);
3061 	if (!state->context) {
3062 		kfree(state);
3063 		return -ENOMEM;
3064 	}
3065 
3066 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3067 
3068 	drm_atomic_private_obj_init(adev_to_drm(adev),
3069 				    &adev->dm.atomic_obj,
3070 				    &state->base,
3071 				    &dm_atomic_state_funcs);
3072 
3073 	r = amdgpu_display_modeset_create_props(adev);
3074 	if (r) {
3075 		dc_release_state(state->context);
3076 		kfree(state);
3077 		return r;
3078 	}
3079 
3080 	r = amdgpu_dm_audio_init(adev);
3081 	if (r) {
3082 		dc_release_state(state->context);
3083 		kfree(state);
3084 		return r;
3085 	}
3086 
3087 	return 0;
3088 }
3089 
3090 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3091 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3092 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3093 
3094 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3095 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3096 
3097 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3098 {
3099 #if defined(CONFIG_ACPI)
3100 	struct amdgpu_dm_backlight_caps caps;
3101 
3102 	memset(&caps, 0, sizeof(caps));
3103 
3104 	if (dm->backlight_caps.caps_valid)
3105 		return;
3106 
3107 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3108 	if (caps.caps_valid) {
3109 		dm->backlight_caps.caps_valid = true;
3110 		if (caps.aux_support)
3111 			return;
3112 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3113 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3114 	} else {
3115 		dm->backlight_caps.min_input_signal =
3116 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3117 		dm->backlight_caps.max_input_signal =
3118 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3119 	}
3120 #else
3121 	if (dm->backlight_caps.aux_support)
3122 		return;
3123 
3124 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3125 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3126 #endif
3127 }
3128 
3129 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3130 {
3131 	bool rc;
3132 
3133 	if (!link)
3134 		return 1;
3135 
3136 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3137 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3138 
3139 	return rc ? 0 : 1;
3140 }
3141 
3142 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3143 				unsigned *min, unsigned *max)
3144 {
3145 	if (!caps)
3146 		return 0;
3147 
3148 	if (caps->aux_support) {
3149 		// Firmware limits are in nits, DC API wants millinits.
3150 		*max = 1000 * caps->aux_max_input_signal;
3151 		*min = 1000 * caps->aux_min_input_signal;
3152 	} else {
3153 		// Firmware limits are 8-bit, PWM control is 16-bit.
3154 		*max = 0x101 * caps->max_input_signal;
3155 		*min = 0x101 * caps->min_input_signal;
3156 	}
3157 	return 1;
3158 }
3159 
3160 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3161 					uint32_t brightness)
3162 {
3163 	unsigned min, max;
3164 
3165 	if (!get_brightness_range(caps, &min, &max))
3166 		return brightness;
3167 
3168 	// Rescale 0..255 to min..max
3169 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3170 				       AMDGPU_MAX_BL_LEVEL);
3171 }
3172 
3173 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3174 				      uint32_t brightness)
3175 {
3176 	unsigned min, max;
3177 
3178 	if (!get_brightness_range(caps, &min, &max))
3179 		return brightness;
3180 
3181 	if (brightness < min)
3182 		return 0;
3183 	// Rescale min..max to 0..255
3184 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3185 				 max - min);
3186 }
3187 
3188 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3189 {
3190 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3191 	struct amdgpu_dm_backlight_caps caps;
3192 	struct dc_link *link = NULL;
3193 	u32 brightness;
3194 	bool rc;
3195 
3196 	amdgpu_dm_update_backlight_caps(dm);
3197 	caps = dm->backlight_caps;
3198 
3199 	link = (struct dc_link *)dm->backlight_link;
3200 
3201 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3202 	// Change brightness based on AUX property
3203 	if (caps.aux_support)
3204 		return set_backlight_via_aux(link, brightness);
3205 
3206 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3207 
3208 	return rc ? 0 : 1;
3209 }
3210 
3211 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3212 {
3213 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3214 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3215 
3216 	if (ret == DC_ERROR_UNEXPECTED)
3217 		return bd->props.brightness;
3218 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3219 }
3220 
3221 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3222 	.options = BL_CORE_SUSPENDRESUME,
3223 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3224 	.update_status	= amdgpu_dm_backlight_update_status,
3225 };
3226 
3227 static void
3228 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3229 {
3230 	char bl_name[16];
3231 	struct backlight_properties props = { 0 };
3232 
3233 	amdgpu_dm_update_backlight_caps(dm);
3234 
3235 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3236 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3237 	props.type = BACKLIGHT_RAW;
3238 
3239 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3240 		 adev_to_drm(dm->adev)->primary->index);
3241 
3242 	dm->backlight_dev = backlight_device_register(bl_name,
3243 						      adev_to_drm(dm->adev)->dev,
3244 						      dm,
3245 						      &amdgpu_dm_backlight_ops,
3246 						      &props);
3247 
3248 	if (IS_ERR(dm->backlight_dev))
3249 		DRM_ERROR("DM: Backlight registration failed!\n");
3250 	else
3251 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3252 }
3253 
3254 #endif
3255 
3256 static int initialize_plane(struct amdgpu_display_manager *dm,
3257 			    struct amdgpu_mode_info *mode_info, int plane_id,
3258 			    enum drm_plane_type plane_type,
3259 			    const struct dc_plane_cap *plane_cap)
3260 {
3261 	struct drm_plane *plane;
3262 	unsigned long possible_crtcs;
3263 	int ret = 0;
3264 
3265 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3266 	if (!plane) {
3267 		DRM_ERROR("KMS: Failed to allocate plane\n");
3268 		return -ENOMEM;
3269 	}
3270 	plane->type = plane_type;
3271 
3272 	/*
3273 	 * HACK: IGT tests expect that the primary plane for a CRTC
3274 	 * can only have one possible CRTC. Only expose support for
3275 	 * any CRTC if they're not going to be used as a primary plane
3276 	 * for a CRTC - like overlay or underlay planes.
3277 	 */
3278 	possible_crtcs = 1 << plane_id;
3279 	if (plane_id >= dm->dc->caps.max_streams)
3280 		possible_crtcs = 0xff;
3281 
3282 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3283 
3284 	if (ret) {
3285 		DRM_ERROR("KMS: Failed to initialize plane\n");
3286 		kfree(plane);
3287 		return ret;
3288 	}
3289 
3290 	if (mode_info)
3291 		mode_info->planes[plane_id] = plane;
3292 
3293 	return ret;
3294 }
3295 
3296 
3297 static void register_backlight_device(struct amdgpu_display_manager *dm,
3298 				      struct dc_link *link)
3299 {
3300 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3301 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3302 
3303 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3304 	    link->type != dc_connection_none) {
3305 		/*
3306 		 * Event if registration failed, we should continue with
3307 		 * DM initialization because not having a backlight control
3308 		 * is better then a black screen.
3309 		 */
3310 		amdgpu_dm_register_backlight_device(dm);
3311 
3312 		if (dm->backlight_dev)
3313 			dm->backlight_link = link;
3314 	}
3315 #endif
3316 }
3317 
3318 
3319 /*
3320  * In this architecture, the association
3321  * connector -> encoder -> crtc
3322  * id not really requried. The crtc and connector will hold the
3323  * display_index as an abstraction to use with DAL component
3324  *
3325  * Returns 0 on success
3326  */
3327 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3328 {
3329 	struct amdgpu_display_manager *dm = &adev->dm;
3330 	int32_t i;
3331 	struct amdgpu_dm_connector *aconnector = NULL;
3332 	struct amdgpu_encoder *aencoder = NULL;
3333 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3334 	uint32_t link_cnt;
3335 	int32_t primary_planes;
3336 	enum dc_connection_type new_connection_type = dc_connection_none;
3337 	const struct dc_plane_cap *plane;
3338 
3339 	dm->display_indexes_num = dm->dc->caps.max_streams;
3340 	/* Update the actual used number of crtc */
3341 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3342 
3343 	link_cnt = dm->dc->caps.max_links;
3344 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3345 		DRM_ERROR("DM: Failed to initialize mode config\n");
3346 		return -EINVAL;
3347 	}
3348 
3349 	/* There is one primary plane per CRTC */
3350 	primary_planes = dm->dc->caps.max_streams;
3351 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3352 
3353 	/*
3354 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3355 	 * Order is reversed to match iteration order in atomic check.
3356 	 */
3357 	for (i = (primary_planes - 1); i >= 0; i--) {
3358 		plane = &dm->dc->caps.planes[i];
3359 
3360 		if (initialize_plane(dm, mode_info, i,
3361 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3362 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3363 			goto fail;
3364 		}
3365 	}
3366 
3367 	/*
3368 	 * Initialize overlay planes, index starting after primary planes.
3369 	 * These planes have a higher DRM index than the primary planes since
3370 	 * they should be considered as having a higher z-order.
3371 	 * Order is reversed to match iteration order in atomic check.
3372 	 *
3373 	 * Only support DCN for now, and only expose one so we don't encourage
3374 	 * userspace to use up all the pipes.
3375 	 */
3376 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3377 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3378 
3379 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3380 			continue;
3381 
3382 		if (!plane->blends_with_above || !plane->blends_with_below)
3383 			continue;
3384 
3385 		if (!plane->pixel_format_support.argb8888)
3386 			continue;
3387 
3388 		if (initialize_plane(dm, NULL, primary_planes + i,
3389 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3390 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3391 			goto fail;
3392 		}
3393 
3394 		/* Only create one overlay plane. */
3395 		break;
3396 	}
3397 
3398 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3399 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3400 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3401 			goto fail;
3402 		}
3403 
3404 	/* loops over all connectors on the board */
3405 	for (i = 0; i < link_cnt; i++) {
3406 		struct dc_link *link = NULL;
3407 
3408 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3409 			DRM_ERROR(
3410 				"KMS: Cannot support more than %d display indexes\n",
3411 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3412 			continue;
3413 		}
3414 
3415 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3416 		if (!aconnector)
3417 			goto fail;
3418 
3419 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3420 		if (!aencoder)
3421 			goto fail;
3422 
3423 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3424 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3425 			goto fail;
3426 		}
3427 
3428 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3429 			DRM_ERROR("KMS: Failed to initialize connector\n");
3430 			goto fail;
3431 		}
3432 
3433 		link = dc_get_link_at_index(dm->dc, i);
3434 
3435 		if (!dc_link_detect_sink(link, &new_connection_type))
3436 			DRM_ERROR("KMS: Failed to detect connector\n");
3437 
3438 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3439 			emulated_link_detect(link);
3440 			amdgpu_dm_update_connector_after_detect(aconnector);
3441 
3442 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3443 			amdgpu_dm_update_connector_after_detect(aconnector);
3444 			register_backlight_device(dm, link);
3445 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3446 				amdgpu_dm_set_psr_caps(link);
3447 		}
3448 
3449 
3450 	}
3451 
3452 	/* Software is initialized. Now we can register interrupt handlers. */
3453 	switch (adev->asic_type) {
3454 #if defined(CONFIG_DRM_AMD_DC_SI)
3455 	case CHIP_TAHITI:
3456 	case CHIP_PITCAIRN:
3457 	case CHIP_VERDE:
3458 	case CHIP_OLAND:
3459 		if (dce60_register_irq_handlers(dm->adev)) {
3460 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3461 			goto fail;
3462 		}
3463 		break;
3464 #endif
3465 	case CHIP_BONAIRE:
3466 	case CHIP_HAWAII:
3467 	case CHIP_KAVERI:
3468 	case CHIP_KABINI:
3469 	case CHIP_MULLINS:
3470 	case CHIP_TONGA:
3471 	case CHIP_FIJI:
3472 	case CHIP_CARRIZO:
3473 	case CHIP_STONEY:
3474 	case CHIP_POLARIS11:
3475 	case CHIP_POLARIS10:
3476 	case CHIP_POLARIS12:
3477 	case CHIP_VEGAM:
3478 	case CHIP_VEGA10:
3479 	case CHIP_VEGA12:
3480 	case CHIP_VEGA20:
3481 		if (dce110_register_irq_handlers(dm->adev)) {
3482 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3483 			goto fail;
3484 		}
3485 		break;
3486 #if defined(CONFIG_DRM_AMD_DC_DCN)
3487 	case CHIP_RAVEN:
3488 	case CHIP_NAVI12:
3489 	case CHIP_NAVI10:
3490 	case CHIP_NAVI14:
3491 	case CHIP_RENOIR:
3492 	case CHIP_SIENNA_CICHLID:
3493 	case CHIP_NAVY_FLOUNDER:
3494 	case CHIP_DIMGREY_CAVEFISH:
3495 	case CHIP_VANGOGH:
3496 		if (dcn10_register_irq_handlers(dm->adev)) {
3497 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3498 			goto fail;
3499 		}
3500 		break;
3501 #endif
3502 	default:
3503 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3504 		goto fail;
3505 	}
3506 
3507 	return 0;
3508 fail:
3509 	kfree(aencoder);
3510 	kfree(aconnector);
3511 
3512 	return -EINVAL;
3513 }
3514 
3515 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3516 {
3517 	drm_mode_config_cleanup(dm->ddev);
3518 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3519 	return;
3520 }
3521 
3522 /******************************************************************************
3523  * amdgpu_display_funcs functions
3524  *****************************************************************************/
3525 
3526 /*
3527  * dm_bandwidth_update - program display watermarks
3528  *
3529  * @adev: amdgpu_device pointer
3530  *
3531  * Calculate and program the display watermarks and line buffer allocation.
3532  */
3533 static void dm_bandwidth_update(struct amdgpu_device *adev)
3534 {
3535 	/* TODO: implement later */
3536 }
3537 
3538 static const struct amdgpu_display_funcs dm_display_funcs = {
3539 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3540 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3541 	.backlight_set_level = NULL, /* never called for DC */
3542 	.backlight_get_level = NULL, /* never called for DC */
3543 	.hpd_sense = NULL,/* called unconditionally */
3544 	.hpd_set_polarity = NULL, /* called unconditionally */
3545 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3546 	.page_flip_get_scanoutpos =
3547 		dm_crtc_get_scanoutpos,/* called unconditionally */
3548 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3549 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3550 };
3551 
3552 #if defined(CONFIG_DEBUG_KERNEL_DC)
3553 
3554 static ssize_t s3_debug_store(struct device *device,
3555 			      struct device_attribute *attr,
3556 			      const char *buf,
3557 			      size_t count)
3558 {
3559 	int ret;
3560 	int s3_state;
3561 	struct drm_device *drm_dev = dev_get_drvdata(device);
3562 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3563 
3564 	ret = kstrtoint(buf, 0, &s3_state);
3565 
3566 	if (ret == 0) {
3567 		if (s3_state) {
3568 			dm_resume(adev);
3569 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3570 		} else
3571 			dm_suspend(adev);
3572 	}
3573 
3574 	return ret == 0 ? count : 0;
3575 }
3576 
3577 DEVICE_ATTR_WO(s3_debug);
3578 
3579 #endif
3580 
3581 static int dm_early_init(void *handle)
3582 {
3583 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3584 
3585 	switch (adev->asic_type) {
3586 #if defined(CONFIG_DRM_AMD_DC_SI)
3587 	case CHIP_TAHITI:
3588 	case CHIP_PITCAIRN:
3589 	case CHIP_VERDE:
3590 		adev->mode_info.num_crtc = 6;
3591 		adev->mode_info.num_hpd = 6;
3592 		adev->mode_info.num_dig = 6;
3593 		break;
3594 	case CHIP_OLAND:
3595 		adev->mode_info.num_crtc = 2;
3596 		adev->mode_info.num_hpd = 2;
3597 		adev->mode_info.num_dig = 2;
3598 		break;
3599 #endif
3600 	case CHIP_BONAIRE:
3601 	case CHIP_HAWAII:
3602 		adev->mode_info.num_crtc = 6;
3603 		adev->mode_info.num_hpd = 6;
3604 		adev->mode_info.num_dig = 6;
3605 		break;
3606 	case CHIP_KAVERI:
3607 		adev->mode_info.num_crtc = 4;
3608 		adev->mode_info.num_hpd = 6;
3609 		adev->mode_info.num_dig = 7;
3610 		break;
3611 	case CHIP_KABINI:
3612 	case CHIP_MULLINS:
3613 		adev->mode_info.num_crtc = 2;
3614 		adev->mode_info.num_hpd = 6;
3615 		adev->mode_info.num_dig = 6;
3616 		break;
3617 	case CHIP_FIJI:
3618 	case CHIP_TONGA:
3619 		adev->mode_info.num_crtc = 6;
3620 		adev->mode_info.num_hpd = 6;
3621 		adev->mode_info.num_dig = 7;
3622 		break;
3623 	case CHIP_CARRIZO:
3624 		adev->mode_info.num_crtc = 3;
3625 		adev->mode_info.num_hpd = 6;
3626 		adev->mode_info.num_dig = 9;
3627 		break;
3628 	case CHIP_STONEY:
3629 		adev->mode_info.num_crtc = 2;
3630 		adev->mode_info.num_hpd = 6;
3631 		adev->mode_info.num_dig = 9;
3632 		break;
3633 	case CHIP_POLARIS11:
3634 	case CHIP_POLARIS12:
3635 		adev->mode_info.num_crtc = 5;
3636 		adev->mode_info.num_hpd = 5;
3637 		adev->mode_info.num_dig = 5;
3638 		break;
3639 	case CHIP_POLARIS10:
3640 	case CHIP_VEGAM:
3641 		adev->mode_info.num_crtc = 6;
3642 		adev->mode_info.num_hpd = 6;
3643 		adev->mode_info.num_dig = 6;
3644 		break;
3645 	case CHIP_VEGA10:
3646 	case CHIP_VEGA12:
3647 	case CHIP_VEGA20:
3648 		adev->mode_info.num_crtc = 6;
3649 		adev->mode_info.num_hpd = 6;
3650 		adev->mode_info.num_dig = 6;
3651 		break;
3652 #if defined(CONFIG_DRM_AMD_DC_DCN)
3653 	case CHIP_RAVEN:
3654 	case CHIP_RENOIR:
3655 	case CHIP_VANGOGH:
3656 		adev->mode_info.num_crtc = 4;
3657 		adev->mode_info.num_hpd = 4;
3658 		adev->mode_info.num_dig = 4;
3659 		break;
3660 	case CHIP_NAVI10:
3661 	case CHIP_NAVI12:
3662 	case CHIP_SIENNA_CICHLID:
3663 	case CHIP_NAVY_FLOUNDER:
3664 		adev->mode_info.num_crtc = 6;
3665 		adev->mode_info.num_hpd = 6;
3666 		adev->mode_info.num_dig = 6;
3667 		break;
3668 	case CHIP_NAVI14:
3669 	case CHIP_DIMGREY_CAVEFISH:
3670 		adev->mode_info.num_crtc = 5;
3671 		adev->mode_info.num_hpd = 5;
3672 		adev->mode_info.num_dig = 5;
3673 		break;
3674 #endif
3675 	default:
3676 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3677 		return -EINVAL;
3678 	}
3679 
3680 	amdgpu_dm_set_irq_funcs(adev);
3681 
3682 	if (adev->mode_info.funcs == NULL)
3683 		adev->mode_info.funcs = &dm_display_funcs;
3684 
3685 	/*
3686 	 * Note: Do NOT change adev->audio_endpt_rreg and
3687 	 * adev->audio_endpt_wreg because they are initialised in
3688 	 * amdgpu_device_init()
3689 	 */
3690 #if defined(CONFIG_DEBUG_KERNEL_DC)
3691 	device_create_file(
3692 		adev_to_drm(adev)->dev,
3693 		&dev_attr_s3_debug);
3694 #endif
3695 
3696 	return 0;
3697 }
3698 
3699 static bool modeset_required(struct drm_crtc_state *crtc_state,
3700 			     struct dc_stream_state *new_stream,
3701 			     struct dc_stream_state *old_stream)
3702 {
3703 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3704 }
3705 
3706 static bool modereset_required(struct drm_crtc_state *crtc_state)
3707 {
3708 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3709 }
3710 
3711 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3712 {
3713 	drm_encoder_cleanup(encoder);
3714 	kfree(encoder);
3715 }
3716 
3717 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3718 	.destroy = amdgpu_dm_encoder_destroy,
3719 };
3720 
3721 
3722 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3723 				struct dc_scaling_info *scaling_info)
3724 {
3725 	int scale_w, scale_h;
3726 
3727 	memset(scaling_info, 0, sizeof(*scaling_info));
3728 
3729 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3730 	scaling_info->src_rect.x = state->src_x >> 16;
3731 	scaling_info->src_rect.y = state->src_y >> 16;
3732 
3733 	scaling_info->src_rect.width = state->src_w >> 16;
3734 	if (scaling_info->src_rect.width == 0)
3735 		return -EINVAL;
3736 
3737 	scaling_info->src_rect.height = state->src_h >> 16;
3738 	if (scaling_info->src_rect.height == 0)
3739 		return -EINVAL;
3740 
3741 	scaling_info->dst_rect.x = state->crtc_x;
3742 	scaling_info->dst_rect.y = state->crtc_y;
3743 
3744 	if (state->crtc_w == 0)
3745 		return -EINVAL;
3746 
3747 	scaling_info->dst_rect.width = state->crtc_w;
3748 
3749 	if (state->crtc_h == 0)
3750 		return -EINVAL;
3751 
3752 	scaling_info->dst_rect.height = state->crtc_h;
3753 
3754 	/* DRM doesn't specify clipping on destination output. */
3755 	scaling_info->clip_rect = scaling_info->dst_rect;
3756 
3757 	/* TODO: Validate scaling per-format with DC plane caps */
3758 	scale_w = scaling_info->dst_rect.width * 1000 /
3759 		  scaling_info->src_rect.width;
3760 
3761 	if (scale_w < 250 || scale_w > 16000)
3762 		return -EINVAL;
3763 
3764 	scale_h = scaling_info->dst_rect.height * 1000 /
3765 		  scaling_info->src_rect.height;
3766 
3767 	if (scale_h < 250 || scale_h > 16000)
3768 		return -EINVAL;
3769 
3770 	/*
3771 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3772 	 * assume reasonable defaults based on the format.
3773 	 */
3774 
3775 	return 0;
3776 }
3777 
3778 static void
3779 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3780 				 uint64_t tiling_flags)
3781 {
3782 	/* Fill GFX8 params */
3783 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3784 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3785 
3786 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3787 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3788 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3789 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3790 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3791 
3792 		/* XXX fix me for VI */
3793 		tiling_info->gfx8.num_banks = num_banks;
3794 		tiling_info->gfx8.array_mode =
3795 				DC_ARRAY_2D_TILED_THIN1;
3796 		tiling_info->gfx8.tile_split = tile_split;
3797 		tiling_info->gfx8.bank_width = bankw;
3798 		tiling_info->gfx8.bank_height = bankh;
3799 		tiling_info->gfx8.tile_aspect = mtaspect;
3800 		tiling_info->gfx8.tile_mode =
3801 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3802 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3803 			== DC_ARRAY_1D_TILED_THIN1) {
3804 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3805 	}
3806 
3807 	tiling_info->gfx8.pipe_config =
3808 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3809 }
3810 
3811 static void
3812 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3813 				  union dc_tiling_info *tiling_info)
3814 {
3815 	tiling_info->gfx9.num_pipes =
3816 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3817 	tiling_info->gfx9.num_banks =
3818 		adev->gfx.config.gb_addr_config_fields.num_banks;
3819 	tiling_info->gfx9.pipe_interleave =
3820 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3821 	tiling_info->gfx9.num_shader_engines =
3822 		adev->gfx.config.gb_addr_config_fields.num_se;
3823 	tiling_info->gfx9.max_compressed_frags =
3824 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3825 	tiling_info->gfx9.num_rb_per_se =
3826 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3827 	tiling_info->gfx9.shaderEnable = 1;
3828 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3829 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3830 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3831 	    adev->asic_type == CHIP_VANGOGH)
3832 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3833 }
3834 
3835 static int
3836 validate_dcc(struct amdgpu_device *adev,
3837 	     const enum surface_pixel_format format,
3838 	     const enum dc_rotation_angle rotation,
3839 	     const union dc_tiling_info *tiling_info,
3840 	     const struct dc_plane_dcc_param *dcc,
3841 	     const struct dc_plane_address *address,
3842 	     const struct plane_size *plane_size)
3843 {
3844 	struct dc *dc = adev->dm.dc;
3845 	struct dc_dcc_surface_param input;
3846 	struct dc_surface_dcc_cap output;
3847 
3848 	memset(&input, 0, sizeof(input));
3849 	memset(&output, 0, sizeof(output));
3850 
3851 	if (!dcc->enable)
3852 		return 0;
3853 
3854 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3855 	    !dc->cap_funcs.get_dcc_compression_cap)
3856 		return -EINVAL;
3857 
3858 	input.format = format;
3859 	input.surface_size.width = plane_size->surface_size.width;
3860 	input.surface_size.height = plane_size->surface_size.height;
3861 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3862 
3863 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3864 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3865 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3866 		input.scan = SCAN_DIRECTION_VERTICAL;
3867 
3868 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3869 		return -EINVAL;
3870 
3871 	if (!output.capable)
3872 		return -EINVAL;
3873 
3874 	if (dcc->independent_64b_blks == 0 &&
3875 	    output.grph.rgb.independent_64b_blks != 0)
3876 		return -EINVAL;
3877 
3878 	return 0;
3879 }
3880 
3881 static bool
3882 modifier_has_dcc(uint64_t modifier)
3883 {
3884 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3885 }
3886 
3887 static unsigned
3888 modifier_gfx9_swizzle_mode(uint64_t modifier)
3889 {
3890 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3891 		return 0;
3892 
3893 	return AMD_FMT_MOD_GET(TILE, modifier);
3894 }
3895 
3896 static const struct drm_format_info *
3897 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3898 {
3899 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3900 }
3901 
3902 static void
3903 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3904 				    union dc_tiling_info *tiling_info,
3905 				    uint64_t modifier)
3906 {
3907 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3908 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3909 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3910 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3911 
3912 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3913 
3914 	if (!IS_AMD_FMT_MOD(modifier))
3915 		return;
3916 
3917 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3918 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3919 
3920 	if (adev->family >= AMDGPU_FAMILY_NV) {
3921 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3922 	} else {
3923 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3924 
3925 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3926 	}
3927 }
3928 
3929 enum dm_micro_swizzle {
3930 	MICRO_SWIZZLE_Z = 0,
3931 	MICRO_SWIZZLE_S = 1,
3932 	MICRO_SWIZZLE_D = 2,
3933 	MICRO_SWIZZLE_R = 3
3934 };
3935 
3936 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3937 					  uint32_t format,
3938 					  uint64_t modifier)
3939 {
3940 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3941 	const struct drm_format_info *info = drm_format_info(format);
3942 
3943 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3944 
3945 	if (!info)
3946 		return false;
3947 
3948 	/*
3949 	 * We always have to allow this modifier, because core DRM still
3950 	 * checks LINEAR support if userspace does not provide modifers.
3951 	 */
3952 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3953 		return true;
3954 
3955 	/*
3956 	 * The arbitrary tiling support for multiplane formats has not been hooked
3957 	 * up.
3958 	 */
3959 	if (info->num_planes > 1)
3960 		return false;
3961 
3962 	/*
3963 	 * For D swizzle the canonical modifier depends on the bpp, so check
3964 	 * it here.
3965 	 */
3966 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3967 	    adev->family >= AMDGPU_FAMILY_NV) {
3968 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3969 			return false;
3970 	}
3971 
3972 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3973 	    info->cpp[0] < 8)
3974 		return false;
3975 
3976 	if (modifier_has_dcc(modifier)) {
3977 		/* Per radeonsi comments 16/64 bpp are more complicated. */
3978 		if (info->cpp[0] != 4)
3979 			return false;
3980 	}
3981 
3982 	return true;
3983 }
3984 
3985 static void
3986 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3987 {
3988 	if (!*mods)
3989 		return;
3990 
3991 	if (*cap - *size < 1) {
3992 		uint64_t new_cap = *cap * 2;
3993 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
3994 
3995 		if (!new_mods) {
3996 			kfree(*mods);
3997 			*mods = NULL;
3998 			return;
3999 		}
4000 
4001 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4002 		kfree(*mods);
4003 		*mods = new_mods;
4004 		*cap = new_cap;
4005 	}
4006 
4007 	(*mods)[*size] = mod;
4008 	*size += 1;
4009 }
4010 
4011 static void
4012 add_gfx9_modifiers(const struct amdgpu_device *adev,
4013 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4014 {
4015 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4016 	int pipe_xor_bits = min(8, pipes +
4017 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4018 	int bank_xor_bits = min(8 - pipe_xor_bits,
4019 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4020 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4021 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4022 
4023 
4024 	if (adev->family == AMDGPU_FAMILY_RV) {
4025 		/* Raven2 and later */
4026 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4027 
4028 		/*
4029 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4030 		 * doesn't support _D on DCN
4031 		 */
4032 
4033 		if (has_constant_encode) {
4034 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4035 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4036 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4037 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4038 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4039 				    AMD_FMT_MOD_SET(DCC, 1) |
4040 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4041 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4042 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4043 		}
4044 
4045 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4046 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4047 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4048 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4049 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4050 			    AMD_FMT_MOD_SET(DCC, 1) |
4051 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4052 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4053 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4054 
4055 		if (has_constant_encode) {
4056 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4057 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4058 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4059 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4060 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4061 				    AMD_FMT_MOD_SET(DCC, 1) |
4062 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4063 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4064 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4065 
4066 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4067 				    AMD_FMT_MOD_SET(RB, rb) |
4068 				    AMD_FMT_MOD_SET(PIPE, pipes));
4069 		}
4070 
4071 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4072 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4073 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4074 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4075 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4076 			    AMD_FMT_MOD_SET(DCC, 1) |
4077 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4078 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4079 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4080 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4081 			    AMD_FMT_MOD_SET(RB, rb) |
4082 			    AMD_FMT_MOD_SET(PIPE, pipes));
4083 	}
4084 
4085 	/*
4086 	 * Only supported for 64bpp on Raven, will be filtered on format in
4087 	 * dm_plane_format_mod_supported.
4088 	 */
4089 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4090 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4091 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4092 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4093 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4094 
4095 	if (adev->family == AMDGPU_FAMILY_RV) {
4096 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4097 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4098 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4099 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4100 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4101 	}
4102 
4103 	/*
4104 	 * Only supported for 64bpp on Raven, will be filtered on format in
4105 	 * dm_plane_format_mod_supported.
4106 	 */
4107 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4108 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4109 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4110 
4111 	if (adev->family == AMDGPU_FAMILY_RV) {
4112 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4113 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4114 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4115 	}
4116 }
4117 
4118 static void
4119 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4120 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4121 {
4122 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4123 
4124 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4126 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4127 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 		    AMD_FMT_MOD_SET(DCC, 1) |
4129 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4130 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4131 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4132 
4133 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4134 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4135 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4136 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4137 		    AMD_FMT_MOD_SET(DCC, 1) |
4138 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4139 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4140 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4141 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4142 
4143 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4144 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4145 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4146 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4147 
4148 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4150 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4151 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4152 
4153 
4154 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4158 
4159 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4160 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4161 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4162 }
4163 
4164 static void
4165 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4166 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4167 {
4168 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4169 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4170 
4171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4175 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4176 		    AMD_FMT_MOD_SET(DCC, 1) |
4177 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4178 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4179 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4180 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4181 
4182 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4183 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4184 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4185 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4186 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4187 		    AMD_FMT_MOD_SET(DCC, 1) |
4188 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4189 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4190 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4191 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4192 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4193 
4194 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4195 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4196 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4197 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4198 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4199 
4200 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4201 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4202 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4203 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4204 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4205 
4206 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4207 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4208 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4209 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4210 
4211 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4212 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4213 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4214 }
4215 
4216 static int
4217 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4218 {
4219 	uint64_t size = 0, capacity = 128;
4220 	*mods = NULL;
4221 
4222 	/* We have not hooked up any pre-GFX9 modifiers. */
4223 	if (adev->family < AMDGPU_FAMILY_AI)
4224 		return 0;
4225 
4226 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4227 
4228 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4229 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4230 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4231 		return *mods ? 0 : -ENOMEM;
4232 	}
4233 
4234 	switch (adev->family) {
4235 	case AMDGPU_FAMILY_AI:
4236 	case AMDGPU_FAMILY_RV:
4237 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4238 		break;
4239 	case AMDGPU_FAMILY_NV:
4240 	case AMDGPU_FAMILY_VGH:
4241 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4242 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4243 		else
4244 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4245 		break;
4246 	}
4247 
4248 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4249 
4250 	/* INVALID marks the end of the list. */
4251 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4252 
4253 	if (!*mods)
4254 		return -ENOMEM;
4255 
4256 	return 0;
4257 }
4258 
4259 static int
4260 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4261 					  const struct amdgpu_framebuffer *afb,
4262 					  const enum surface_pixel_format format,
4263 					  const enum dc_rotation_angle rotation,
4264 					  const struct plane_size *plane_size,
4265 					  union dc_tiling_info *tiling_info,
4266 					  struct dc_plane_dcc_param *dcc,
4267 					  struct dc_plane_address *address,
4268 					  const bool force_disable_dcc)
4269 {
4270 	const uint64_t modifier = afb->base.modifier;
4271 	int ret;
4272 
4273 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4274 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4275 
4276 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4277 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4278 
4279 		dcc->enable = 1;
4280 		dcc->meta_pitch = afb->base.pitches[1];
4281 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4282 
4283 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4284 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4285 	}
4286 
4287 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4288 	if (ret)
4289 		return ret;
4290 
4291 	return 0;
4292 }
4293 
4294 static int
4295 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4296 			     const struct amdgpu_framebuffer *afb,
4297 			     const enum surface_pixel_format format,
4298 			     const enum dc_rotation_angle rotation,
4299 			     const uint64_t tiling_flags,
4300 			     union dc_tiling_info *tiling_info,
4301 			     struct plane_size *plane_size,
4302 			     struct dc_plane_dcc_param *dcc,
4303 			     struct dc_plane_address *address,
4304 			     bool tmz_surface,
4305 			     bool force_disable_dcc)
4306 {
4307 	const struct drm_framebuffer *fb = &afb->base;
4308 	int ret;
4309 
4310 	memset(tiling_info, 0, sizeof(*tiling_info));
4311 	memset(plane_size, 0, sizeof(*plane_size));
4312 	memset(dcc, 0, sizeof(*dcc));
4313 	memset(address, 0, sizeof(*address));
4314 
4315 	address->tmz_surface = tmz_surface;
4316 
4317 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4318 		uint64_t addr = afb->address + fb->offsets[0];
4319 
4320 		plane_size->surface_size.x = 0;
4321 		plane_size->surface_size.y = 0;
4322 		plane_size->surface_size.width = fb->width;
4323 		plane_size->surface_size.height = fb->height;
4324 		plane_size->surface_pitch =
4325 			fb->pitches[0] / fb->format->cpp[0];
4326 
4327 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4328 		address->grph.addr.low_part = lower_32_bits(addr);
4329 		address->grph.addr.high_part = upper_32_bits(addr);
4330 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4331 		uint64_t luma_addr = afb->address + fb->offsets[0];
4332 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4333 
4334 		plane_size->surface_size.x = 0;
4335 		plane_size->surface_size.y = 0;
4336 		plane_size->surface_size.width = fb->width;
4337 		plane_size->surface_size.height = fb->height;
4338 		plane_size->surface_pitch =
4339 			fb->pitches[0] / fb->format->cpp[0];
4340 
4341 		plane_size->chroma_size.x = 0;
4342 		plane_size->chroma_size.y = 0;
4343 		/* TODO: set these based on surface format */
4344 		plane_size->chroma_size.width = fb->width / 2;
4345 		plane_size->chroma_size.height = fb->height / 2;
4346 
4347 		plane_size->chroma_pitch =
4348 			fb->pitches[1] / fb->format->cpp[1];
4349 
4350 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4351 		address->video_progressive.luma_addr.low_part =
4352 			lower_32_bits(luma_addr);
4353 		address->video_progressive.luma_addr.high_part =
4354 			upper_32_bits(luma_addr);
4355 		address->video_progressive.chroma_addr.low_part =
4356 			lower_32_bits(chroma_addr);
4357 		address->video_progressive.chroma_addr.high_part =
4358 			upper_32_bits(chroma_addr);
4359 	}
4360 
4361 	if (adev->family >= AMDGPU_FAMILY_AI) {
4362 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4363 								rotation, plane_size,
4364 								tiling_info, dcc,
4365 								address,
4366 								force_disable_dcc);
4367 		if (ret)
4368 			return ret;
4369 	} else {
4370 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4371 	}
4372 
4373 	return 0;
4374 }
4375 
4376 static void
4377 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4378 			       bool *per_pixel_alpha, bool *global_alpha,
4379 			       int *global_alpha_value)
4380 {
4381 	*per_pixel_alpha = false;
4382 	*global_alpha = false;
4383 	*global_alpha_value = 0xff;
4384 
4385 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4386 		return;
4387 
4388 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4389 		static const uint32_t alpha_formats[] = {
4390 			DRM_FORMAT_ARGB8888,
4391 			DRM_FORMAT_RGBA8888,
4392 			DRM_FORMAT_ABGR8888,
4393 		};
4394 		uint32_t format = plane_state->fb->format->format;
4395 		unsigned int i;
4396 
4397 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4398 			if (format == alpha_formats[i]) {
4399 				*per_pixel_alpha = true;
4400 				break;
4401 			}
4402 		}
4403 	}
4404 
4405 	if (plane_state->alpha < 0xffff) {
4406 		*global_alpha = true;
4407 		*global_alpha_value = plane_state->alpha >> 8;
4408 	}
4409 }
4410 
4411 static int
4412 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4413 			    const enum surface_pixel_format format,
4414 			    enum dc_color_space *color_space)
4415 {
4416 	bool full_range;
4417 
4418 	*color_space = COLOR_SPACE_SRGB;
4419 
4420 	/* DRM color properties only affect non-RGB formats. */
4421 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4422 		return 0;
4423 
4424 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4425 
4426 	switch (plane_state->color_encoding) {
4427 	case DRM_COLOR_YCBCR_BT601:
4428 		if (full_range)
4429 			*color_space = COLOR_SPACE_YCBCR601;
4430 		else
4431 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4432 		break;
4433 
4434 	case DRM_COLOR_YCBCR_BT709:
4435 		if (full_range)
4436 			*color_space = COLOR_SPACE_YCBCR709;
4437 		else
4438 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4439 		break;
4440 
4441 	case DRM_COLOR_YCBCR_BT2020:
4442 		if (full_range)
4443 			*color_space = COLOR_SPACE_2020_YCBCR;
4444 		else
4445 			return -EINVAL;
4446 		break;
4447 
4448 	default:
4449 		return -EINVAL;
4450 	}
4451 
4452 	return 0;
4453 }
4454 
4455 static int
4456 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4457 			    const struct drm_plane_state *plane_state,
4458 			    const uint64_t tiling_flags,
4459 			    struct dc_plane_info *plane_info,
4460 			    struct dc_plane_address *address,
4461 			    bool tmz_surface,
4462 			    bool force_disable_dcc)
4463 {
4464 	const struct drm_framebuffer *fb = plane_state->fb;
4465 	const struct amdgpu_framebuffer *afb =
4466 		to_amdgpu_framebuffer(plane_state->fb);
4467 	struct drm_format_name_buf format_name;
4468 	int ret;
4469 
4470 	memset(plane_info, 0, sizeof(*plane_info));
4471 
4472 	switch (fb->format->format) {
4473 	case DRM_FORMAT_C8:
4474 		plane_info->format =
4475 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4476 		break;
4477 	case DRM_FORMAT_RGB565:
4478 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4479 		break;
4480 	case DRM_FORMAT_XRGB8888:
4481 	case DRM_FORMAT_ARGB8888:
4482 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4483 		break;
4484 	case DRM_FORMAT_XRGB2101010:
4485 	case DRM_FORMAT_ARGB2101010:
4486 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4487 		break;
4488 	case DRM_FORMAT_XBGR2101010:
4489 	case DRM_FORMAT_ABGR2101010:
4490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4491 		break;
4492 	case DRM_FORMAT_XBGR8888:
4493 	case DRM_FORMAT_ABGR8888:
4494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4495 		break;
4496 	case DRM_FORMAT_NV21:
4497 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4498 		break;
4499 	case DRM_FORMAT_NV12:
4500 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4501 		break;
4502 	case DRM_FORMAT_P010:
4503 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4504 		break;
4505 	case DRM_FORMAT_XRGB16161616F:
4506 	case DRM_FORMAT_ARGB16161616F:
4507 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4508 		break;
4509 	case DRM_FORMAT_XBGR16161616F:
4510 	case DRM_FORMAT_ABGR16161616F:
4511 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4512 		break;
4513 	default:
4514 		DRM_ERROR(
4515 			"Unsupported screen format %s\n",
4516 			drm_get_format_name(fb->format->format, &format_name));
4517 		return -EINVAL;
4518 	}
4519 
4520 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4521 	case DRM_MODE_ROTATE_0:
4522 		plane_info->rotation = ROTATION_ANGLE_0;
4523 		break;
4524 	case DRM_MODE_ROTATE_90:
4525 		plane_info->rotation = ROTATION_ANGLE_90;
4526 		break;
4527 	case DRM_MODE_ROTATE_180:
4528 		plane_info->rotation = ROTATION_ANGLE_180;
4529 		break;
4530 	case DRM_MODE_ROTATE_270:
4531 		plane_info->rotation = ROTATION_ANGLE_270;
4532 		break;
4533 	default:
4534 		plane_info->rotation = ROTATION_ANGLE_0;
4535 		break;
4536 	}
4537 
4538 	plane_info->visible = true;
4539 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4540 
4541 	plane_info->layer_index = 0;
4542 
4543 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4544 					  &plane_info->color_space);
4545 	if (ret)
4546 		return ret;
4547 
4548 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4549 					   plane_info->rotation, tiling_flags,
4550 					   &plane_info->tiling_info,
4551 					   &plane_info->plane_size,
4552 					   &plane_info->dcc, address, tmz_surface,
4553 					   force_disable_dcc);
4554 	if (ret)
4555 		return ret;
4556 
4557 	fill_blending_from_plane_state(
4558 		plane_state, &plane_info->per_pixel_alpha,
4559 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4560 
4561 	return 0;
4562 }
4563 
4564 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4565 				    struct dc_plane_state *dc_plane_state,
4566 				    struct drm_plane_state *plane_state,
4567 				    struct drm_crtc_state *crtc_state)
4568 {
4569 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4570 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4571 	struct dc_scaling_info scaling_info;
4572 	struct dc_plane_info plane_info;
4573 	int ret;
4574 	bool force_disable_dcc = false;
4575 
4576 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4577 	if (ret)
4578 		return ret;
4579 
4580 	dc_plane_state->src_rect = scaling_info.src_rect;
4581 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4582 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4583 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4584 
4585 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4586 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4587 					  afb->tiling_flags,
4588 					  &plane_info,
4589 					  &dc_plane_state->address,
4590 					  afb->tmz_surface,
4591 					  force_disable_dcc);
4592 	if (ret)
4593 		return ret;
4594 
4595 	dc_plane_state->format = plane_info.format;
4596 	dc_plane_state->color_space = plane_info.color_space;
4597 	dc_plane_state->format = plane_info.format;
4598 	dc_plane_state->plane_size = plane_info.plane_size;
4599 	dc_plane_state->rotation = plane_info.rotation;
4600 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4601 	dc_plane_state->stereo_format = plane_info.stereo_format;
4602 	dc_plane_state->tiling_info = plane_info.tiling_info;
4603 	dc_plane_state->visible = plane_info.visible;
4604 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4605 	dc_plane_state->global_alpha = plane_info.global_alpha;
4606 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4607 	dc_plane_state->dcc = plane_info.dcc;
4608 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4609 
4610 	/*
4611 	 * Always set input transfer function, since plane state is refreshed
4612 	 * every time.
4613 	 */
4614 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4615 	if (ret)
4616 		return ret;
4617 
4618 	return 0;
4619 }
4620 
4621 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4622 					   const struct dm_connector_state *dm_state,
4623 					   struct dc_stream_state *stream)
4624 {
4625 	enum amdgpu_rmx_type rmx_type;
4626 
4627 	struct rect src = { 0 }; /* viewport in composition space*/
4628 	struct rect dst = { 0 }; /* stream addressable area */
4629 
4630 	/* no mode. nothing to be done */
4631 	if (!mode)
4632 		return;
4633 
4634 	/* Full screen scaling by default */
4635 	src.width = mode->hdisplay;
4636 	src.height = mode->vdisplay;
4637 	dst.width = stream->timing.h_addressable;
4638 	dst.height = stream->timing.v_addressable;
4639 
4640 	if (dm_state) {
4641 		rmx_type = dm_state->scaling;
4642 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4643 			if (src.width * dst.height <
4644 					src.height * dst.width) {
4645 				/* height needs less upscaling/more downscaling */
4646 				dst.width = src.width *
4647 						dst.height / src.height;
4648 			} else {
4649 				/* width needs less upscaling/more downscaling */
4650 				dst.height = src.height *
4651 						dst.width / src.width;
4652 			}
4653 		} else if (rmx_type == RMX_CENTER) {
4654 			dst = src;
4655 		}
4656 
4657 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4658 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4659 
4660 		if (dm_state->underscan_enable) {
4661 			dst.x += dm_state->underscan_hborder / 2;
4662 			dst.y += dm_state->underscan_vborder / 2;
4663 			dst.width -= dm_state->underscan_hborder;
4664 			dst.height -= dm_state->underscan_vborder;
4665 		}
4666 	}
4667 
4668 	stream->src = src;
4669 	stream->dst = dst;
4670 
4671 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4672 			dst.x, dst.y, dst.width, dst.height);
4673 
4674 }
4675 
4676 static enum dc_color_depth
4677 convert_color_depth_from_display_info(const struct drm_connector *connector,
4678 				      bool is_y420, int requested_bpc)
4679 {
4680 	uint8_t bpc;
4681 
4682 	if (is_y420) {
4683 		bpc = 8;
4684 
4685 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4686 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4687 			bpc = 16;
4688 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4689 			bpc = 12;
4690 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4691 			bpc = 10;
4692 	} else {
4693 		bpc = (uint8_t)connector->display_info.bpc;
4694 		/* Assume 8 bpc by default if no bpc is specified. */
4695 		bpc = bpc ? bpc : 8;
4696 	}
4697 
4698 	if (requested_bpc > 0) {
4699 		/*
4700 		 * Cap display bpc based on the user requested value.
4701 		 *
4702 		 * The value for state->max_bpc may not correctly updated
4703 		 * depending on when the connector gets added to the state
4704 		 * or if this was called outside of atomic check, so it
4705 		 * can't be used directly.
4706 		 */
4707 		bpc = min_t(u8, bpc, requested_bpc);
4708 
4709 		/* Round down to the nearest even number. */
4710 		bpc = bpc - (bpc & 1);
4711 	}
4712 
4713 	switch (bpc) {
4714 	case 0:
4715 		/*
4716 		 * Temporary Work around, DRM doesn't parse color depth for
4717 		 * EDID revision before 1.4
4718 		 * TODO: Fix edid parsing
4719 		 */
4720 		return COLOR_DEPTH_888;
4721 	case 6:
4722 		return COLOR_DEPTH_666;
4723 	case 8:
4724 		return COLOR_DEPTH_888;
4725 	case 10:
4726 		return COLOR_DEPTH_101010;
4727 	case 12:
4728 		return COLOR_DEPTH_121212;
4729 	case 14:
4730 		return COLOR_DEPTH_141414;
4731 	case 16:
4732 		return COLOR_DEPTH_161616;
4733 	default:
4734 		return COLOR_DEPTH_UNDEFINED;
4735 	}
4736 }
4737 
4738 static enum dc_aspect_ratio
4739 get_aspect_ratio(const struct drm_display_mode *mode_in)
4740 {
4741 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4742 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4743 }
4744 
4745 static enum dc_color_space
4746 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4747 {
4748 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4749 
4750 	switch (dc_crtc_timing->pixel_encoding)	{
4751 	case PIXEL_ENCODING_YCBCR422:
4752 	case PIXEL_ENCODING_YCBCR444:
4753 	case PIXEL_ENCODING_YCBCR420:
4754 	{
4755 		/*
4756 		 * 27030khz is the separation point between HDTV and SDTV
4757 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4758 		 * respectively
4759 		 */
4760 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4761 			if (dc_crtc_timing->flags.Y_ONLY)
4762 				color_space =
4763 					COLOR_SPACE_YCBCR709_LIMITED;
4764 			else
4765 				color_space = COLOR_SPACE_YCBCR709;
4766 		} else {
4767 			if (dc_crtc_timing->flags.Y_ONLY)
4768 				color_space =
4769 					COLOR_SPACE_YCBCR601_LIMITED;
4770 			else
4771 				color_space = COLOR_SPACE_YCBCR601;
4772 		}
4773 
4774 	}
4775 	break;
4776 	case PIXEL_ENCODING_RGB:
4777 		color_space = COLOR_SPACE_SRGB;
4778 		break;
4779 
4780 	default:
4781 		WARN_ON(1);
4782 		break;
4783 	}
4784 
4785 	return color_space;
4786 }
4787 
4788 static bool adjust_colour_depth_from_display_info(
4789 	struct dc_crtc_timing *timing_out,
4790 	const struct drm_display_info *info)
4791 {
4792 	enum dc_color_depth depth = timing_out->display_color_depth;
4793 	int normalized_clk;
4794 	do {
4795 		normalized_clk = timing_out->pix_clk_100hz / 10;
4796 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4797 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4798 			normalized_clk /= 2;
4799 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4800 		switch (depth) {
4801 		case COLOR_DEPTH_888:
4802 			break;
4803 		case COLOR_DEPTH_101010:
4804 			normalized_clk = (normalized_clk * 30) / 24;
4805 			break;
4806 		case COLOR_DEPTH_121212:
4807 			normalized_clk = (normalized_clk * 36) / 24;
4808 			break;
4809 		case COLOR_DEPTH_161616:
4810 			normalized_clk = (normalized_clk * 48) / 24;
4811 			break;
4812 		default:
4813 			/* The above depths are the only ones valid for HDMI. */
4814 			return false;
4815 		}
4816 		if (normalized_clk <= info->max_tmds_clock) {
4817 			timing_out->display_color_depth = depth;
4818 			return true;
4819 		}
4820 	} while (--depth > COLOR_DEPTH_666);
4821 	return false;
4822 }
4823 
4824 static void fill_stream_properties_from_drm_display_mode(
4825 	struct dc_stream_state *stream,
4826 	const struct drm_display_mode *mode_in,
4827 	const struct drm_connector *connector,
4828 	const struct drm_connector_state *connector_state,
4829 	const struct dc_stream_state *old_stream,
4830 	int requested_bpc)
4831 {
4832 	struct dc_crtc_timing *timing_out = &stream->timing;
4833 	const struct drm_display_info *info = &connector->display_info;
4834 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4835 	struct hdmi_vendor_infoframe hv_frame;
4836 	struct hdmi_avi_infoframe avi_frame;
4837 
4838 	memset(&hv_frame, 0, sizeof(hv_frame));
4839 	memset(&avi_frame, 0, sizeof(avi_frame));
4840 
4841 	timing_out->h_border_left = 0;
4842 	timing_out->h_border_right = 0;
4843 	timing_out->v_border_top = 0;
4844 	timing_out->v_border_bottom = 0;
4845 	/* TODO: un-hardcode */
4846 	if (drm_mode_is_420_only(info, mode_in)
4847 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4848 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4849 	else if (drm_mode_is_420_also(info, mode_in)
4850 			&& aconnector->force_yuv420_output)
4851 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4852 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4853 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4854 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4855 	else
4856 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4857 
4858 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4859 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4860 		connector,
4861 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4862 		requested_bpc);
4863 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4864 	timing_out->hdmi_vic = 0;
4865 
4866 	if(old_stream) {
4867 		timing_out->vic = old_stream->timing.vic;
4868 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4869 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4870 	} else {
4871 		timing_out->vic = drm_match_cea_mode(mode_in);
4872 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4873 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4874 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4875 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4876 	}
4877 
4878 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4879 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4880 		timing_out->vic = avi_frame.video_code;
4881 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4882 		timing_out->hdmi_vic = hv_frame.vic;
4883 	}
4884 
4885 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4886 	timing_out->h_total = mode_in->crtc_htotal;
4887 	timing_out->h_sync_width =
4888 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4889 	timing_out->h_front_porch =
4890 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4891 	timing_out->v_total = mode_in->crtc_vtotal;
4892 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4893 	timing_out->v_front_porch =
4894 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4895 	timing_out->v_sync_width =
4896 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4897 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4898 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4899 
4900 	stream->output_color_space = get_output_color_space(timing_out);
4901 
4902 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4903 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4904 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4905 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4906 		    drm_mode_is_420_also(info, mode_in) &&
4907 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4908 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4909 			adjust_colour_depth_from_display_info(timing_out, info);
4910 		}
4911 	}
4912 }
4913 
4914 static void fill_audio_info(struct audio_info *audio_info,
4915 			    const struct drm_connector *drm_connector,
4916 			    const struct dc_sink *dc_sink)
4917 {
4918 	int i = 0;
4919 	int cea_revision = 0;
4920 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4921 
4922 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4923 	audio_info->product_id = edid_caps->product_id;
4924 
4925 	cea_revision = drm_connector->display_info.cea_rev;
4926 
4927 	strscpy(audio_info->display_name,
4928 		edid_caps->display_name,
4929 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4930 
4931 	if (cea_revision >= 3) {
4932 		audio_info->mode_count = edid_caps->audio_mode_count;
4933 
4934 		for (i = 0; i < audio_info->mode_count; ++i) {
4935 			audio_info->modes[i].format_code =
4936 					(enum audio_format_code)
4937 					(edid_caps->audio_modes[i].format_code);
4938 			audio_info->modes[i].channel_count =
4939 					edid_caps->audio_modes[i].channel_count;
4940 			audio_info->modes[i].sample_rates.all =
4941 					edid_caps->audio_modes[i].sample_rate;
4942 			audio_info->modes[i].sample_size =
4943 					edid_caps->audio_modes[i].sample_size;
4944 		}
4945 	}
4946 
4947 	audio_info->flags.all = edid_caps->speaker_flags;
4948 
4949 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4950 	if (drm_connector->latency_present[0]) {
4951 		audio_info->video_latency = drm_connector->video_latency[0];
4952 		audio_info->audio_latency = drm_connector->audio_latency[0];
4953 	}
4954 
4955 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4956 
4957 }
4958 
4959 static void
4960 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4961 				      struct drm_display_mode *dst_mode)
4962 {
4963 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4964 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4965 	dst_mode->crtc_clock = src_mode->crtc_clock;
4966 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4967 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4968 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4969 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4970 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4971 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4972 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4973 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4974 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4975 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4976 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4977 }
4978 
4979 static void
4980 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4981 					const struct drm_display_mode *native_mode,
4982 					bool scale_enabled)
4983 {
4984 	if (scale_enabled) {
4985 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4986 	} else if (native_mode->clock == drm_mode->clock &&
4987 			native_mode->htotal == drm_mode->htotal &&
4988 			native_mode->vtotal == drm_mode->vtotal) {
4989 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4990 	} else {
4991 		/* no scaling nor amdgpu inserted, no need to patch */
4992 	}
4993 }
4994 
4995 static struct dc_sink *
4996 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4997 {
4998 	struct dc_sink_init_data sink_init_data = { 0 };
4999 	struct dc_sink *sink = NULL;
5000 	sink_init_data.link = aconnector->dc_link;
5001 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5002 
5003 	sink = dc_sink_create(&sink_init_data);
5004 	if (!sink) {
5005 		DRM_ERROR("Failed to create sink!\n");
5006 		return NULL;
5007 	}
5008 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5009 
5010 	return sink;
5011 }
5012 
5013 static void set_multisync_trigger_params(
5014 		struct dc_stream_state *stream)
5015 {
5016 	if (stream->triggered_crtc_reset.enabled) {
5017 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5018 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5019 	}
5020 }
5021 
5022 static void set_master_stream(struct dc_stream_state *stream_set[],
5023 			      int stream_count)
5024 {
5025 	int j, highest_rfr = 0, master_stream = 0;
5026 
5027 	for (j = 0;  j < stream_count; j++) {
5028 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5029 			int refresh_rate = 0;
5030 
5031 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5032 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5033 			if (refresh_rate > highest_rfr) {
5034 				highest_rfr = refresh_rate;
5035 				master_stream = j;
5036 			}
5037 		}
5038 	}
5039 	for (j = 0;  j < stream_count; j++) {
5040 		if (stream_set[j])
5041 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5042 	}
5043 }
5044 
5045 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5046 {
5047 	int i = 0;
5048 
5049 	if (context->stream_count < 2)
5050 		return;
5051 	for (i = 0; i < context->stream_count ; i++) {
5052 		if (!context->streams[i])
5053 			continue;
5054 		/*
5055 		 * TODO: add a function to read AMD VSDB bits and set
5056 		 * crtc_sync_master.multi_sync_enabled flag
5057 		 * For now it's set to false
5058 		 */
5059 		set_multisync_trigger_params(context->streams[i]);
5060 	}
5061 	set_master_stream(context->streams, context->stream_count);
5062 }
5063 
5064 static struct dc_stream_state *
5065 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5066 		       const struct drm_display_mode *drm_mode,
5067 		       const struct dm_connector_state *dm_state,
5068 		       const struct dc_stream_state *old_stream,
5069 		       int requested_bpc)
5070 {
5071 	struct drm_display_mode *preferred_mode = NULL;
5072 	struct drm_connector *drm_connector;
5073 	const struct drm_connector_state *con_state =
5074 		dm_state ? &dm_state->base : NULL;
5075 	struct dc_stream_state *stream = NULL;
5076 	struct drm_display_mode mode = *drm_mode;
5077 	bool native_mode_found = false;
5078 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5079 	int mode_refresh;
5080 	int preferred_refresh = 0;
5081 #if defined(CONFIG_DRM_AMD_DC_DCN)
5082 	struct dsc_dec_dpcd_caps dsc_caps;
5083 	uint32_t link_bandwidth_kbps;
5084 #endif
5085 	struct dc_sink *sink = NULL;
5086 	if (aconnector == NULL) {
5087 		DRM_ERROR("aconnector is NULL!\n");
5088 		return stream;
5089 	}
5090 
5091 	drm_connector = &aconnector->base;
5092 
5093 	if (!aconnector->dc_sink) {
5094 		sink = create_fake_sink(aconnector);
5095 		if (!sink)
5096 			return stream;
5097 	} else {
5098 		sink = aconnector->dc_sink;
5099 		dc_sink_retain(sink);
5100 	}
5101 
5102 	stream = dc_create_stream_for_sink(sink);
5103 
5104 	if (stream == NULL) {
5105 		DRM_ERROR("Failed to create stream for sink!\n");
5106 		goto finish;
5107 	}
5108 
5109 	stream->dm_stream_context = aconnector;
5110 
5111 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5112 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5113 
5114 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5115 		/* Search for preferred mode */
5116 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5117 			native_mode_found = true;
5118 			break;
5119 		}
5120 	}
5121 	if (!native_mode_found)
5122 		preferred_mode = list_first_entry_or_null(
5123 				&aconnector->base.modes,
5124 				struct drm_display_mode,
5125 				head);
5126 
5127 	mode_refresh = drm_mode_vrefresh(&mode);
5128 
5129 	if (preferred_mode == NULL) {
5130 		/*
5131 		 * This may not be an error, the use case is when we have no
5132 		 * usermode calls to reset and set mode upon hotplug. In this
5133 		 * case, we call set mode ourselves to restore the previous mode
5134 		 * and the modelist may not be filled in in time.
5135 		 */
5136 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5137 	} else {
5138 		decide_crtc_timing_for_drm_display_mode(
5139 				&mode, preferred_mode,
5140 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5141 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5142 	}
5143 
5144 	if (!dm_state)
5145 		drm_mode_set_crtcinfo(&mode, 0);
5146 
5147 	/*
5148 	* If scaling is enabled and refresh rate didn't change
5149 	* we copy the vic and polarities of the old timings
5150 	*/
5151 	if (!scale || mode_refresh != preferred_refresh)
5152 		fill_stream_properties_from_drm_display_mode(stream,
5153 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5154 	else
5155 		fill_stream_properties_from_drm_display_mode(stream,
5156 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5157 
5158 	stream->timing.flags.DSC = 0;
5159 
5160 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5161 #if defined(CONFIG_DRM_AMD_DC_DCN)
5162 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5163 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5164 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5165 				      &dsc_caps);
5166 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5167 							     dc_link_get_link_cap(aconnector->dc_link));
5168 
5169 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5170 			/* Set DSC policy according to dsc_clock_en */
5171 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5172 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5173 
5174 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5175 						  &dsc_caps,
5176 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5177 						  0,
5178 						  link_bandwidth_kbps,
5179 						  &stream->timing,
5180 						  &stream->timing.dsc_cfg))
5181 				stream->timing.flags.DSC = 1;
5182 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5183 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5184 				stream->timing.flags.DSC = 1;
5185 
5186 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5187 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5188 
5189 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5190 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5191 
5192 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5193 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5194 		}
5195 #endif
5196 	}
5197 
5198 	update_stream_scaling_settings(&mode, dm_state, stream);
5199 
5200 	fill_audio_info(
5201 		&stream->audio_info,
5202 		drm_connector,
5203 		sink);
5204 
5205 	update_stream_signal(stream, sink);
5206 
5207 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5208 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5209 
5210 	if (stream->link->psr_settings.psr_feature_enabled) {
5211 		//
5212 		// should decide stream support vsc sdp colorimetry capability
5213 		// before building vsc info packet
5214 		//
5215 		stream->use_vsc_sdp_for_colorimetry = false;
5216 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5217 			stream->use_vsc_sdp_for_colorimetry =
5218 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5219 		} else {
5220 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5221 				stream->use_vsc_sdp_for_colorimetry = true;
5222 		}
5223 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5224 	}
5225 finish:
5226 	dc_sink_release(sink);
5227 
5228 	return stream;
5229 }
5230 
5231 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5232 {
5233 	drm_crtc_cleanup(crtc);
5234 	kfree(crtc);
5235 }
5236 
5237 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5238 				  struct drm_crtc_state *state)
5239 {
5240 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5241 
5242 	/* TODO Destroy dc_stream objects are stream object is flattened */
5243 	if (cur->stream)
5244 		dc_stream_release(cur->stream);
5245 
5246 
5247 	__drm_atomic_helper_crtc_destroy_state(state);
5248 
5249 
5250 	kfree(state);
5251 }
5252 
5253 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5254 {
5255 	struct dm_crtc_state *state;
5256 
5257 	if (crtc->state)
5258 		dm_crtc_destroy_state(crtc, crtc->state);
5259 
5260 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5261 	if (WARN_ON(!state))
5262 		return;
5263 
5264 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5265 }
5266 
5267 static struct drm_crtc_state *
5268 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5269 {
5270 	struct dm_crtc_state *state, *cur;
5271 
5272 	cur = to_dm_crtc_state(crtc->state);
5273 
5274 	if (WARN_ON(!crtc->state))
5275 		return NULL;
5276 
5277 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5278 	if (!state)
5279 		return NULL;
5280 
5281 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5282 
5283 	if (cur->stream) {
5284 		state->stream = cur->stream;
5285 		dc_stream_retain(state->stream);
5286 	}
5287 
5288 	state->active_planes = cur->active_planes;
5289 	state->vrr_infopacket = cur->vrr_infopacket;
5290 	state->abm_level = cur->abm_level;
5291 	state->vrr_supported = cur->vrr_supported;
5292 	state->freesync_config = cur->freesync_config;
5293 	state->crc_src = cur->crc_src;
5294 	state->cm_has_degamma = cur->cm_has_degamma;
5295 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5296 
5297 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5298 
5299 	return &state->base;
5300 }
5301 
5302 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5303 {
5304 	enum dc_irq_source irq_source;
5305 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5306 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5307 	int rc;
5308 
5309 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5310 
5311 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5312 
5313 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5314 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5315 	return rc;
5316 }
5317 
5318 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5319 {
5320 	enum dc_irq_source irq_source;
5321 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5322 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5323 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5324 	int rc = 0;
5325 
5326 	if (enable) {
5327 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5328 		if (amdgpu_dm_vrr_active(acrtc_state))
5329 			rc = dm_set_vupdate_irq(crtc, true);
5330 	} else {
5331 		/* vblank irq off -> vupdate irq off */
5332 		rc = dm_set_vupdate_irq(crtc, false);
5333 	}
5334 
5335 	if (rc)
5336 		return rc;
5337 
5338 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5339 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5340 }
5341 
5342 static int dm_enable_vblank(struct drm_crtc *crtc)
5343 {
5344 	return dm_set_vblank(crtc, true);
5345 }
5346 
5347 static void dm_disable_vblank(struct drm_crtc *crtc)
5348 {
5349 	dm_set_vblank(crtc, false);
5350 }
5351 
5352 /* Implemented only the options currently availible for the driver */
5353 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5354 	.reset = dm_crtc_reset_state,
5355 	.destroy = amdgpu_dm_crtc_destroy,
5356 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5357 	.set_config = drm_atomic_helper_set_config,
5358 	.page_flip = drm_atomic_helper_page_flip,
5359 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5360 	.atomic_destroy_state = dm_crtc_destroy_state,
5361 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5362 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5363 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5364 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5365 	.enable_vblank = dm_enable_vblank,
5366 	.disable_vblank = dm_disable_vblank,
5367 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5368 };
5369 
5370 static enum drm_connector_status
5371 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5372 {
5373 	bool connected;
5374 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5375 
5376 	/*
5377 	 * Notes:
5378 	 * 1. This interface is NOT called in context of HPD irq.
5379 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5380 	 * makes it a bad place for *any* MST-related activity.
5381 	 */
5382 
5383 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5384 	    !aconnector->fake_enable)
5385 		connected = (aconnector->dc_sink != NULL);
5386 	else
5387 		connected = (aconnector->base.force == DRM_FORCE_ON);
5388 
5389 	update_subconnector_property(aconnector);
5390 
5391 	return (connected ? connector_status_connected :
5392 			connector_status_disconnected);
5393 }
5394 
5395 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5396 					    struct drm_connector_state *connector_state,
5397 					    struct drm_property *property,
5398 					    uint64_t val)
5399 {
5400 	struct drm_device *dev = connector->dev;
5401 	struct amdgpu_device *adev = drm_to_adev(dev);
5402 	struct dm_connector_state *dm_old_state =
5403 		to_dm_connector_state(connector->state);
5404 	struct dm_connector_state *dm_new_state =
5405 		to_dm_connector_state(connector_state);
5406 
5407 	int ret = -EINVAL;
5408 
5409 	if (property == dev->mode_config.scaling_mode_property) {
5410 		enum amdgpu_rmx_type rmx_type;
5411 
5412 		switch (val) {
5413 		case DRM_MODE_SCALE_CENTER:
5414 			rmx_type = RMX_CENTER;
5415 			break;
5416 		case DRM_MODE_SCALE_ASPECT:
5417 			rmx_type = RMX_ASPECT;
5418 			break;
5419 		case DRM_MODE_SCALE_FULLSCREEN:
5420 			rmx_type = RMX_FULL;
5421 			break;
5422 		case DRM_MODE_SCALE_NONE:
5423 		default:
5424 			rmx_type = RMX_OFF;
5425 			break;
5426 		}
5427 
5428 		if (dm_old_state->scaling == rmx_type)
5429 			return 0;
5430 
5431 		dm_new_state->scaling = rmx_type;
5432 		ret = 0;
5433 	} else if (property == adev->mode_info.underscan_hborder_property) {
5434 		dm_new_state->underscan_hborder = val;
5435 		ret = 0;
5436 	} else if (property == adev->mode_info.underscan_vborder_property) {
5437 		dm_new_state->underscan_vborder = val;
5438 		ret = 0;
5439 	} else if (property == adev->mode_info.underscan_property) {
5440 		dm_new_state->underscan_enable = val;
5441 		ret = 0;
5442 	} else if (property == adev->mode_info.abm_level_property) {
5443 		dm_new_state->abm_level = val;
5444 		ret = 0;
5445 	}
5446 
5447 	return ret;
5448 }
5449 
5450 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5451 					    const struct drm_connector_state *state,
5452 					    struct drm_property *property,
5453 					    uint64_t *val)
5454 {
5455 	struct drm_device *dev = connector->dev;
5456 	struct amdgpu_device *adev = drm_to_adev(dev);
5457 	struct dm_connector_state *dm_state =
5458 		to_dm_connector_state(state);
5459 	int ret = -EINVAL;
5460 
5461 	if (property == dev->mode_config.scaling_mode_property) {
5462 		switch (dm_state->scaling) {
5463 		case RMX_CENTER:
5464 			*val = DRM_MODE_SCALE_CENTER;
5465 			break;
5466 		case RMX_ASPECT:
5467 			*val = DRM_MODE_SCALE_ASPECT;
5468 			break;
5469 		case RMX_FULL:
5470 			*val = DRM_MODE_SCALE_FULLSCREEN;
5471 			break;
5472 		case RMX_OFF:
5473 		default:
5474 			*val = DRM_MODE_SCALE_NONE;
5475 			break;
5476 		}
5477 		ret = 0;
5478 	} else if (property == adev->mode_info.underscan_hborder_property) {
5479 		*val = dm_state->underscan_hborder;
5480 		ret = 0;
5481 	} else if (property == adev->mode_info.underscan_vborder_property) {
5482 		*val = dm_state->underscan_vborder;
5483 		ret = 0;
5484 	} else if (property == adev->mode_info.underscan_property) {
5485 		*val = dm_state->underscan_enable;
5486 		ret = 0;
5487 	} else if (property == adev->mode_info.abm_level_property) {
5488 		*val = dm_state->abm_level;
5489 		ret = 0;
5490 	}
5491 
5492 	return ret;
5493 }
5494 
5495 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5496 {
5497 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5498 
5499 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5500 }
5501 
5502 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5503 {
5504 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5505 	const struct dc_link *link = aconnector->dc_link;
5506 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5507 	struct amdgpu_display_manager *dm = &adev->dm;
5508 
5509 	/*
5510 	 * Call only if mst_mgr was iniitalized before since it's not done
5511 	 * for all connector types.
5512 	 */
5513 	if (aconnector->mst_mgr.dev)
5514 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5515 
5516 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5517 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5518 
5519 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5520 	    link->type != dc_connection_none &&
5521 	    dm->backlight_dev) {
5522 		backlight_device_unregister(dm->backlight_dev);
5523 		dm->backlight_dev = NULL;
5524 	}
5525 #endif
5526 
5527 	if (aconnector->dc_em_sink)
5528 		dc_sink_release(aconnector->dc_em_sink);
5529 	aconnector->dc_em_sink = NULL;
5530 	if (aconnector->dc_sink)
5531 		dc_sink_release(aconnector->dc_sink);
5532 	aconnector->dc_sink = NULL;
5533 
5534 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5535 	drm_connector_unregister(connector);
5536 	drm_connector_cleanup(connector);
5537 	if (aconnector->i2c) {
5538 		i2c_del_adapter(&aconnector->i2c->base);
5539 		kfree(aconnector->i2c);
5540 	}
5541 	kfree(aconnector->dm_dp_aux.aux.name);
5542 
5543 	kfree(connector);
5544 }
5545 
5546 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5547 {
5548 	struct dm_connector_state *state =
5549 		to_dm_connector_state(connector->state);
5550 
5551 	if (connector->state)
5552 		__drm_atomic_helper_connector_destroy_state(connector->state);
5553 
5554 	kfree(state);
5555 
5556 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5557 
5558 	if (state) {
5559 		state->scaling = RMX_OFF;
5560 		state->underscan_enable = false;
5561 		state->underscan_hborder = 0;
5562 		state->underscan_vborder = 0;
5563 		state->base.max_requested_bpc = 8;
5564 		state->vcpi_slots = 0;
5565 		state->pbn = 0;
5566 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5567 			state->abm_level = amdgpu_dm_abm_level;
5568 
5569 		__drm_atomic_helper_connector_reset(connector, &state->base);
5570 	}
5571 }
5572 
5573 struct drm_connector_state *
5574 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5575 {
5576 	struct dm_connector_state *state =
5577 		to_dm_connector_state(connector->state);
5578 
5579 	struct dm_connector_state *new_state =
5580 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5581 
5582 	if (!new_state)
5583 		return NULL;
5584 
5585 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5586 
5587 	new_state->freesync_capable = state->freesync_capable;
5588 	new_state->abm_level = state->abm_level;
5589 	new_state->scaling = state->scaling;
5590 	new_state->underscan_enable = state->underscan_enable;
5591 	new_state->underscan_hborder = state->underscan_hborder;
5592 	new_state->underscan_vborder = state->underscan_vborder;
5593 	new_state->vcpi_slots = state->vcpi_slots;
5594 	new_state->pbn = state->pbn;
5595 	return &new_state->base;
5596 }
5597 
5598 static int
5599 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5600 {
5601 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5602 		to_amdgpu_dm_connector(connector);
5603 	int r;
5604 
5605 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5606 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5607 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5608 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5609 		if (r)
5610 			return r;
5611 	}
5612 
5613 #if defined(CONFIG_DEBUG_FS)
5614 	connector_debugfs_init(amdgpu_dm_connector);
5615 #endif
5616 
5617 	return 0;
5618 }
5619 
5620 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5621 	.reset = amdgpu_dm_connector_funcs_reset,
5622 	.detect = amdgpu_dm_connector_detect,
5623 	.fill_modes = drm_helper_probe_single_connector_modes,
5624 	.destroy = amdgpu_dm_connector_destroy,
5625 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5626 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5627 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5628 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5629 	.late_register = amdgpu_dm_connector_late_register,
5630 	.early_unregister = amdgpu_dm_connector_unregister
5631 };
5632 
5633 static int get_modes(struct drm_connector *connector)
5634 {
5635 	return amdgpu_dm_connector_get_modes(connector);
5636 }
5637 
5638 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5639 {
5640 	struct dc_sink_init_data init_params = {
5641 			.link = aconnector->dc_link,
5642 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5643 	};
5644 	struct edid *edid;
5645 
5646 	if (!aconnector->base.edid_blob_ptr) {
5647 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5648 				aconnector->base.name);
5649 
5650 		aconnector->base.force = DRM_FORCE_OFF;
5651 		aconnector->base.override_edid = false;
5652 		return;
5653 	}
5654 
5655 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5656 
5657 	aconnector->edid = edid;
5658 
5659 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5660 		aconnector->dc_link,
5661 		(uint8_t *)edid,
5662 		(edid->extensions + 1) * EDID_LENGTH,
5663 		&init_params);
5664 
5665 	if (aconnector->base.force == DRM_FORCE_ON) {
5666 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5667 		aconnector->dc_link->local_sink :
5668 		aconnector->dc_em_sink;
5669 		dc_sink_retain(aconnector->dc_sink);
5670 	}
5671 }
5672 
5673 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5674 {
5675 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5676 
5677 	/*
5678 	 * In case of headless boot with force on for DP managed connector
5679 	 * Those settings have to be != 0 to get initial modeset
5680 	 */
5681 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5682 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5683 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5684 	}
5685 
5686 
5687 	aconnector->base.override_edid = true;
5688 	create_eml_sink(aconnector);
5689 }
5690 
5691 static struct dc_stream_state *
5692 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5693 				const struct drm_display_mode *drm_mode,
5694 				const struct dm_connector_state *dm_state,
5695 				const struct dc_stream_state *old_stream)
5696 {
5697 	struct drm_connector *connector = &aconnector->base;
5698 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5699 	struct dc_stream_state *stream;
5700 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5701 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5702 	enum dc_status dc_result = DC_OK;
5703 
5704 	do {
5705 		stream = create_stream_for_sink(aconnector, drm_mode,
5706 						dm_state, old_stream,
5707 						requested_bpc);
5708 		if (stream == NULL) {
5709 			DRM_ERROR("Failed to create stream for sink!\n");
5710 			break;
5711 		}
5712 
5713 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5714 
5715 		if (dc_result != DC_OK) {
5716 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5717 				      drm_mode->hdisplay,
5718 				      drm_mode->vdisplay,
5719 				      drm_mode->clock,
5720 				      dc_result,
5721 				      dc_status_to_str(dc_result));
5722 
5723 			dc_stream_release(stream);
5724 			stream = NULL;
5725 			requested_bpc -= 2; /* lower bpc to retry validation */
5726 		}
5727 
5728 	} while (stream == NULL && requested_bpc >= 6);
5729 
5730 	return stream;
5731 }
5732 
5733 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5734 				   struct drm_display_mode *mode)
5735 {
5736 	int result = MODE_ERROR;
5737 	struct dc_sink *dc_sink;
5738 	/* TODO: Unhardcode stream count */
5739 	struct dc_stream_state *stream;
5740 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5741 
5742 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5743 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5744 		return result;
5745 
5746 	/*
5747 	 * Only run this the first time mode_valid is called to initilialize
5748 	 * EDID mgmt
5749 	 */
5750 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5751 		!aconnector->dc_em_sink)
5752 		handle_edid_mgmt(aconnector);
5753 
5754 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5755 
5756 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5757 				aconnector->base.force != DRM_FORCE_ON) {
5758 		DRM_ERROR("dc_sink is NULL!\n");
5759 		goto fail;
5760 	}
5761 
5762 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5763 	if (stream) {
5764 		dc_stream_release(stream);
5765 		result = MODE_OK;
5766 	}
5767 
5768 fail:
5769 	/* TODO: error handling*/
5770 	return result;
5771 }
5772 
5773 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5774 				struct dc_info_packet *out)
5775 {
5776 	struct hdmi_drm_infoframe frame;
5777 	unsigned char buf[30]; /* 26 + 4 */
5778 	ssize_t len;
5779 	int ret, i;
5780 
5781 	memset(out, 0, sizeof(*out));
5782 
5783 	if (!state->hdr_output_metadata)
5784 		return 0;
5785 
5786 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5787 	if (ret)
5788 		return ret;
5789 
5790 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5791 	if (len < 0)
5792 		return (int)len;
5793 
5794 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5795 	if (len != 30)
5796 		return -EINVAL;
5797 
5798 	/* Prepare the infopacket for DC. */
5799 	switch (state->connector->connector_type) {
5800 	case DRM_MODE_CONNECTOR_HDMIA:
5801 		out->hb0 = 0x87; /* type */
5802 		out->hb1 = 0x01; /* version */
5803 		out->hb2 = 0x1A; /* length */
5804 		out->sb[0] = buf[3]; /* checksum */
5805 		i = 1;
5806 		break;
5807 
5808 	case DRM_MODE_CONNECTOR_DisplayPort:
5809 	case DRM_MODE_CONNECTOR_eDP:
5810 		out->hb0 = 0x00; /* sdp id, zero */
5811 		out->hb1 = 0x87; /* type */
5812 		out->hb2 = 0x1D; /* payload len - 1 */
5813 		out->hb3 = (0x13 << 2); /* sdp version */
5814 		out->sb[0] = 0x01; /* version */
5815 		out->sb[1] = 0x1A; /* length */
5816 		i = 2;
5817 		break;
5818 
5819 	default:
5820 		return -EINVAL;
5821 	}
5822 
5823 	memcpy(&out->sb[i], &buf[4], 26);
5824 	out->valid = true;
5825 
5826 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5827 		       sizeof(out->sb), false);
5828 
5829 	return 0;
5830 }
5831 
5832 static bool
5833 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5834 			  const struct drm_connector_state *new_state)
5835 {
5836 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5837 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5838 
5839 	if (old_blob != new_blob) {
5840 		if (old_blob && new_blob &&
5841 		    old_blob->length == new_blob->length)
5842 			return memcmp(old_blob->data, new_blob->data,
5843 				      old_blob->length);
5844 
5845 		return true;
5846 	}
5847 
5848 	return false;
5849 }
5850 
5851 static int
5852 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5853 				 struct drm_atomic_state *state)
5854 {
5855 	struct drm_connector_state *new_con_state =
5856 		drm_atomic_get_new_connector_state(state, conn);
5857 	struct drm_connector_state *old_con_state =
5858 		drm_atomic_get_old_connector_state(state, conn);
5859 	struct drm_crtc *crtc = new_con_state->crtc;
5860 	struct drm_crtc_state *new_crtc_state;
5861 	int ret;
5862 
5863 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5864 
5865 	if (!crtc)
5866 		return 0;
5867 
5868 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5869 		struct dc_info_packet hdr_infopacket;
5870 
5871 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5872 		if (ret)
5873 			return ret;
5874 
5875 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5876 		if (IS_ERR(new_crtc_state))
5877 			return PTR_ERR(new_crtc_state);
5878 
5879 		/*
5880 		 * DC considers the stream backends changed if the
5881 		 * static metadata changes. Forcing the modeset also
5882 		 * gives a simple way for userspace to switch from
5883 		 * 8bpc to 10bpc when setting the metadata to enter
5884 		 * or exit HDR.
5885 		 *
5886 		 * Changing the static metadata after it's been
5887 		 * set is permissible, however. So only force a
5888 		 * modeset if we're entering or exiting HDR.
5889 		 */
5890 		new_crtc_state->mode_changed =
5891 			!old_con_state->hdr_output_metadata ||
5892 			!new_con_state->hdr_output_metadata;
5893 	}
5894 
5895 	return 0;
5896 }
5897 
5898 static const struct drm_connector_helper_funcs
5899 amdgpu_dm_connector_helper_funcs = {
5900 	/*
5901 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5902 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5903 	 * are missing after user start lightdm. So we need to renew modes list.
5904 	 * in get_modes call back, not just return the modes count
5905 	 */
5906 	.get_modes = get_modes,
5907 	.mode_valid = amdgpu_dm_connector_mode_valid,
5908 	.atomic_check = amdgpu_dm_connector_atomic_check,
5909 };
5910 
5911 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5912 {
5913 }
5914 
5915 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5916 {
5917 	struct drm_atomic_state *state = new_crtc_state->state;
5918 	struct drm_plane *plane;
5919 	int num_active = 0;
5920 
5921 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5922 		struct drm_plane_state *new_plane_state;
5923 
5924 		/* Cursor planes are "fake". */
5925 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5926 			continue;
5927 
5928 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5929 
5930 		if (!new_plane_state) {
5931 			/*
5932 			 * The plane is enable on the CRTC and hasn't changed
5933 			 * state. This means that it previously passed
5934 			 * validation and is therefore enabled.
5935 			 */
5936 			num_active += 1;
5937 			continue;
5938 		}
5939 
5940 		/* We need a framebuffer to be considered enabled. */
5941 		num_active += (new_plane_state->fb != NULL);
5942 	}
5943 
5944 	return num_active;
5945 }
5946 
5947 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5948 					 struct drm_crtc_state *new_crtc_state)
5949 {
5950 	struct dm_crtc_state *dm_new_crtc_state =
5951 		to_dm_crtc_state(new_crtc_state);
5952 
5953 	dm_new_crtc_state->active_planes = 0;
5954 
5955 	if (!dm_new_crtc_state->stream)
5956 		return;
5957 
5958 	dm_new_crtc_state->active_planes =
5959 		count_crtc_active_planes(new_crtc_state);
5960 }
5961 
5962 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5963 				       struct drm_atomic_state *state)
5964 {
5965 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
5966 									  crtc);
5967 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5968 	struct dc *dc = adev->dm.dc;
5969 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5970 	int ret = -EINVAL;
5971 
5972 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
5973 
5974 	dm_update_crtc_active_planes(crtc, crtc_state);
5975 
5976 	if (unlikely(!dm_crtc_state->stream &&
5977 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
5978 		WARN_ON(1);
5979 		return ret;
5980 	}
5981 
5982 	/*
5983 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5984 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5985 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5986 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5987 	 */
5988 	if (crtc_state->enable &&
5989 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
5990 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
5991 		return -EINVAL;
5992 	}
5993 
5994 	/* In some use cases, like reset, no stream is attached */
5995 	if (!dm_crtc_state->stream)
5996 		return 0;
5997 
5998 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5999 		return 0;
6000 
6001 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6002 	return ret;
6003 }
6004 
6005 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6006 				      const struct drm_display_mode *mode,
6007 				      struct drm_display_mode *adjusted_mode)
6008 {
6009 	return true;
6010 }
6011 
6012 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6013 	.disable = dm_crtc_helper_disable,
6014 	.atomic_check = dm_crtc_helper_atomic_check,
6015 	.mode_fixup = dm_crtc_helper_mode_fixup,
6016 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6017 };
6018 
6019 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6020 {
6021 
6022 }
6023 
6024 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6025 {
6026 	switch (display_color_depth) {
6027 		case COLOR_DEPTH_666:
6028 			return 6;
6029 		case COLOR_DEPTH_888:
6030 			return 8;
6031 		case COLOR_DEPTH_101010:
6032 			return 10;
6033 		case COLOR_DEPTH_121212:
6034 			return 12;
6035 		case COLOR_DEPTH_141414:
6036 			return 14;
6037 		case COLOR_DEPTH_161616:
6038 			return 16;
6039 		default:
6040 			break;
6041 		}
6042 	return 0;
6043 }
6044 
6045 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6046 					  struct drm_crtc_state *crtc_state,
6047 					  struct drm_connector_state *conn_state)
6048 {
6049 	struct drm_atomic_state *state = crtc_state->state;
6050 	struct drm_connector *connector = conn_state->connector;
6051 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6052 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6053 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6054 	struct drm_dp_mst_topology_mgr *mst_mgr;
6055 	struct drm_dp_mst_port *mst_port;
6056 	enum dc_color_depth color_depth;
6057 	int clock, bpp = 0;
6058 	bool is_y420 = false;
6059 
6060 	if (!aconnector->port || !aconnector->dc_sink)
6061 		return 0;
6062 
6063 	mst_port = aconnector->port;
6064 	mst_mgr = &aconnector->mst_port->mst_mgr;
6065 
6066 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6067 		return 0;
6068 
6069 	if (!state->duplicated) {
6070 		int max_bpc = conn_state->max_requested_bpc;
6071 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6072 				aconnector->force_yuv420_output;
6073 		color_depth = convert_color_depth_from_display_info(connector,
6074 								    is_y420,
6075 								    max_bpc);
6076 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6077 		clock = adjusted_mode->clock;
6078 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6079 	}
6080 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6081 									   mst_mgr,
6082 									   mst_port,
6083 									   dm_new_connector_state->pbn,
6084 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6085 	if (dm_new_connector_state->vcpi_slots < 0) {
6086 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6087 		return dm_new_connector_state->vcpi_slots;
6088 	}
6089 	return 0;
6090 }
6091 
6092 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6093 	.disable = dm_encoder_helper_disable,
6094 	.atomic_check = dm_encoder_helper_atomic_check
6095 };
6096 
6097 #if defined(CONFIG_DRM_AMD_DC_DCN)
6098 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6099 					    struct dc_state *dc_state)
6100 {
6101 	struct dc_stream_state *stream = NULL;
6102 	struct drm_connector *connector;
6103 	struct drm_connector_state *new_con_state, *old_con_state;
6104 	struct amdgpu_dm_connector *aconnector;
6105 	struct dm_connector_state *dm_conn_state;
6106 	int i, j, clock, bpp;
6107 	int vcpi, pbn_div, pbn = 0;
6108 
6109 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6110 
6111 		aconnector = to_amdgpu_dm_connector(connector);
6112 
6113 		if (!aconnector->port)
6114 			continue;
6115 
6116 		if (!new_con_state || !new_con_state->crtc)
6117 			continue;
6118 
6119 		dm_conn_state = to_dm_connector_state(new_con_state);
6120 
6121 		for (j = 0; j < dc_state->stream_count; j++) {
6122 			stream = dc_state->streams[j];
6123 			if (!stream)
6124 				continue;
6125 
6126 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6127 				break;
6128 
6129 			stream = NULL;
6130 		}
6131 
6132 		if (!stream)
6133 			continue;
6134 
6135 		if (stream->timing.flags.DSC != 1) {
6136 			drm_dp_mst_atomic_enable_dsc(state,
6137 						     aconnector->port,
6138 						     dm_conn_state->pbn,
6139 						     0,
6140 						     false);
6141 			continue;
6142 		}
6143 
6144 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6145 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6146 		clock = stream->timing.pix_clk_100hz / 10;
6147 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6148 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6149 						    aconnector->port,
6150 						    pbn, pbn_div,
6151 						    true);
6152 		if (vcpi < 0)
6153 			return vcpi;
6154 
6155 		dm_conn_state->pbn = pbn;
6156 		dm_conn_state->vcpi_slots = vcpi;
6157 	}
6158 	return 0;
6159 }
6160 #endif
6161 
6162 static void dm_drm_plane_reset(struct drm_plane *plane)
6163 {
6164 	struct dm_plane_state *amdgpu_state = NULL;
6165 
6166 	if (plane->state)
6167 		plane->funcs->atomic_destroy_state(plane, plane->state);
6168 
6169 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6170 	WARN_ON(amdgpu_state == NULL);
6171 
6172 	if (amdgpu_state)
6173 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6174 }
6175 
6176 static struct drm_plane_state *
6177 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6178 {
6179 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6180 
6181 	old_dm_plane_state = to_dm_plane_state(plane->state);
6182 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6183 	if (!dm_plane_state)
6184 		return NULL;
6185 
6186 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6187 
6188 	if (old_dm_plane_state->dc_state) {
6189 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6190 		dc_plane_state_retain(dm_plane_state->dc_state);
6191 	}
6192 
6193 	return &dm_plane_state->base;
6194 }
6195 
6196 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6197 				struct drm_plane_state *state)
6198 {
6199 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6200 
6201 	if (dm_plane_state->dc_state)
6202 		dc_plane_state_release(dm_plane_state->dc_state);
6203 
6204 	drm_atomic_helper_plane_destroy_state(plane, state);
6205 }
6206 
6207 static const struct drm_plane_funcs dm_plane_funcs = {
6208 	.update_plane	= drm_atomic_helper_update_plane,
6209 	.disable_plane	= drm_atomic_helper_disable_plane,
6210 	.destroy	= drm_primary_helper_destroy,
6211 	.reset = dm_drm_plane_reset,
6212 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6213 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6214 	.format_mod_supported = dm_plane_format_mod_supported,
6215 };
6216 
6217 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6218 				      struct drm_plane_state *new_state)
6219 {
6220 	struct amdgpu_framebuffer *afb;
6221 	struct drm_gem_object *obj;
6222 	struct amdgpu_device *adev;
6223 	struct amdgpu_bo *rbo;
6224 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6225 	struct list_head list;
6226 	struct ttm_validate_buffer tv;
6227 	struct ww_acquire_ctx ticket;
6228 	uint32_t domain;
6229 	int r;
6230 
6231 	if (!new_state->fb) {
6232 		DRM_DEBUG_DRIVER("No FB bound\n");
6233 		return 0;
6234 	}
6235 
6236 	afb = to_amdgpu_framebuffer(new_state->fb);
6237 	obj = new_state->fb->obj[0];
6238 	rbo = gem_to_amdgpu_bo(obj);
6239 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6240 	INIT_LIST_HEAD(&list);
6241 
6242 	tv.bo = &rbo->tbo;
6243 	tv.num_shared = 1;
6244 	list_add(&tv.head, &list);
6245 
6246 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6247 	if (r) {
6248 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6249 		return r;
6250 	}
6251 
6252 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6253 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6254 	else
6255 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6256 
6257 	r = amdgpu_bo_pin(rbo, domain);
6258 	if (unlikely(r != 0)) {
6259 		if (r != -ERESTARTSYS)
6260 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6261 		ttm_eu_backoff_reservation(&ticket, &list);
6262 		return r;
6263 	}
6264 
6265 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6266 	if (unlikely(r != 0)) {
6267 		amdgpu_bo_unpin(rbo);
6268 		ttm_eu_backoff_reservation(&ticket, &list);
6269 		DRM_ERROR("%p bind failed\n", rbo);
6270 		return r;
6271 	}
6272 
6273 	ttm_eu_backoff_reservation(&ticket, &list);
6274 
6275 	afb->address = amdgpu_bo_gpu_offset(rbo);
6276 
6277 	amdgpu_bo_ref(rbo);
6278 
6279 	/**
6280 	 * We don't do surface updates on planes that have been newly created,
6281 	 * but we also don't have the afb->address during atomic check.
6282 	 *
6283 	 * Fill in buffer attributes depending on the address here, but only on
6284 	 * newly created planes since they're not being used by DC yet and this
6285 	 * won't modify global state.
6286 	 */
6287 	dm_plane_state_old = to_dm_plane_state(plane->state);
6288 	dm_plane_state_new = to_dm_plane_state(new_state);
6289 
6290 	if (dm_plane_state_new->dc_state &&
6291 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6292 		struct dc_plane_state *plane_state =
6293 			dm_plane_state_new->dc_state;
6294 		bool force_disable_dcc = !plane_state->dcc.enable;
6295 
6296 		fill_plane_buffer_attributes(
6297 			adev, afb, plane_state->format, plane_state->rotation,
6298 			afb->tiling_flags,
6299 			&plane_state->tiling_info, &plane_state->plane_size,
6300 			&plane_state->dcc, &plane_state->address,
6301 			afb->tmz_surface, force_disable_dcc);
6302 	}
6303 
6304 	return 0;
6305 }
6306 
6307 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6308 				       struct drm_plane_state *old_state)
6309 {
6310 	struct amdgpu_bo *rbo;
6311 	int r;
6312 
6313 	if (!old_state->fb)
6314 		return;
6315 
6316 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6317 	r = amdgpu_bo_reserve(rbo, false);
6318 	if (unlikely(r)) {
6319 		DRM_ERROR("failed to reserve rbo before unpin\n");
6320 		return;
6321 	}
6322 
6323 	amdgpu_bo_unpin(rbo);
6324 	amdgpu_bo_unreserve(rbo);
6325 	amdgpu_bo_unref(&rbo);
6326 }
6327 
6328 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6329 				       struct drm_crtc_state *new_crtc_state)
6330 {
6331 	int max_downscale = 0;
6332 	int max_upscale = INT_MAX;
6333 
6334 	/* TODO: These should be checked against DC plane caps */
6335 	return drm_atomic_helper_check_plane_state(
6336 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6337 }
6338 
6339 static int dm_plane_atomic_check(struct drm_plane *plane,
6340 				 struct drm_plane_state *state)
6341 {
6342 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6343 	struct dc *dc = adev->dm.dc;
6344 	struct dm_plane_state *dm_plane_state;
6345 	struct dc_scaling_info scaling_info;
6346 	struct drm_crtc_state *new_crtc_state;
6347 	int ret;
6348 
6349 	trace_amdgpu_dm_plane_atomic_check(state);
6350 
6351 	dm_plane_state = to_dm_plane_state(state);
6352 
6353 	if (!dm_plane_state->dc_state)
6354 		return 0;
6355 
6356 	new_crtc_state =
6357 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6358 	if (!new_crtc_state)
6359 		return -EINVAL;
6360 
6361 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6362 	if (ret)
6363 		return ret;
6364 
6365 	ret = fill_dc_scaling_info(state, &scaling_info);
6366 	if (ret)
6367 		return ret;
6368 
6369 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6370 		return 0;
6371 
6372 	return -EINVAL;
6373 }
6374 
6375 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6376 				       struct drm_plane_state *new_plane_state)
6377 {
6378 	/* Only support async updates on cursor planes. */
6379 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6380 		return -EINVAL;
6381 
6382 	return 0;
6383 }
6384 
6385 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6386 					 struct drm_plane_state *new_state)
6387 {
6388 	struct drm_plane_state *old_state =
6389 		drm_atomic_get_old_plane_state(new_state->state, plane);
6390 
6391 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6392 
6393 	swap(plane->state->fb, new_state->fb);
6394 
6395 	plane->state->src_x = new_state->src_x;
6396 	plane->state->src_y = new_state->src_y;
6397 	plane->state->src_w = new_state->src_w;
6398 	plane->state->src_h = new_state->src_h;
6399 	plane->state->crtc_x = new_state->crtc_x;
6400 	plane->state->crtc_y = new_state->crtc_y;
6401 	plane->state->crtc_w = new_state->crtc_w;
6402 	plane->state->crtc_h = new_state->crtc_h;
6403 
6404 	handle_cursor_update(plane, old_state);
6405 }
6406 
6407 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6408 	.prepare_fb = dm_plane_helper_prepare_fb,
6409 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6410 	.atomic_check = dm_plane_atomic_check,
6411 	.atomic_async_check = dm_plane_atomic_async_check,
6412 	.atomic_async_update = dm_plane_atomic_async_update
6413 };
6414 
6415 /*
6416  * TODO: these are currently initialized to rgb formats only.
6417  * For future use cases we should either initialize them dynamically based on
6418  * plane capabilities, or initialize this array to all formats, so internal drm
6419  * check will succeed, and let DC implement proper check
6420  */
6421 static const uint32_t rgb_formats[] = {
6422 	DRM_FORMAT_XRGB8888,
6423 	DRM_FORMAT_ARGB8888,
6424 	DRM_FORMAT_RGBA8888,
6425 	DRM_FORMAT_XRGB2101010,
6426 	DRM_FORMAT_XBGR2101010,
6427 	DRM_FORMAT_ARGB2101010,
6428 	DRM_FORMAT_ABGR2101010,
6429 	DRM_FORMAT_XBGR8888,
6430 	DRM_FORMAT_ABGR8888,
6431 	DRM_FORMAT_RGB565,
6432 };
6433 
6434 static const uint32_t overlay_formats[] = {
6435 	DRM_FORMAT_XRGB8888,
6436 	DRM_FORMAT_ARGB8888,
6437 	DRM_FORMAT_RGBA8888,
6438 	DRM_FORMAT_XBGR8888,
6439 	DRM_FORMAT_ABGR8888,
6440 	DRM_FORMAT_RGB565
6441 };
6442 
6443 static const u32 cursor_formats[] = {
6444 	DRM_FORMAT_ARGB8888
6445 };
6446 
6447 static int get_plane_formats(const struct drm_plane *plane,
6448 			     const struct dc_plane_cap *plane_cap,
6449 			     uint32_t *formats, int max_formats)
6450 {
6451 	int i, num_formats = 0;
6452 
6453 	/*
6454 	 * TODO: Query support for each group of formats directly from
6455 	 * DC plane caps. This will require adding more formats to the
6456 	 * caps list.
6457 	 */
6458 
6459 	switch (plane->type) {
6460 	case DRM_PLANE_TYPE_PRIMARY:
6461 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6462 			if (num_formats >= max_formats)
6463 				break;
6464 
6465 			formats[num_formats++] = rgb_formats[i];
6466 		}
6467 
6468 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6469 			formats[num_formats++] = DRM_FORMAT_NV12;
6470 		if (plane_cap && plane_cap->pixel_format_support.p010)
6471 			formats[num_formats++] = DRM_FORMAT_P010;
6472 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6473 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6474 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6475 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6476 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6477 		}
6478 		break;
6479 
6480 	case DRM_PLANE_TYPE_OVERLAY:
6481 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6482 			if (num_formats >= max_formats)
6483 				break;
6484 
6485 			formats[num_formats++] = overlay_formats[i];
6486 		}
6487 		break;
6488 
6489 	case DRM_PLANE_TYPE_CURSOR:
6490 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6491 			if (num_formats >= max_formats)
6492 				break;
6493 
6494 			formats[num_formats++] = cursor_formats[i];
6495 		}
6496 		break;
6497 	}
6498 
6499 	return num_formats;
6500 }
6501 
6502 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6503 				struct drm_plane *plane,
6504 				unsigned long possible_crtcs,
6505 				const struct dc_plane_cap *plane_cap)
6506 {
6507 	uint32_t formats[32];
6508 	int num_formats;
6509 	int res = -EPERM;
6510 	unsigned int supported_rotations;
6511 	uint64_t *modifiers = NULL;
6512 
6513 	num_formats = get_plane_formats(plane, plane_cap, formats,
6514 					ARRAY_SIZE(formats));
6515 
6516 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6517 	if (res)
6518 		return res;
6519 
6520 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6521 				       &dm_plane_funcs, formats, num_formats,
6522 				       modifiers, plane->type, NULL);
6523 	kfree(modifiers);
6524 	if (res)
6525 		return res;
6526 
6527 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6528 	    plane_cap && plane_cap->per_pixel_alpha) {
6529 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6530 					  BIT(DRM_MODE_BLEND_PREMULTI);
6531 
6532 		drm_plane_create_alpha_property(plane);
6533 		drm_plane_create_blend_mode_property(plane, blend_caps);
6534 	}
6535 
6536 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6537 	    plane_cap &&
6538 	    (plane_cap->pixel_format_support.nv12 ||
6539 	     plane_cap->pixel_format_support.p010)) {
6540 		/* This only affects YUV formats. */
6541 		drm_plane_create_color_properties(
6542 			plane,
6543 			BIT(DRM_COLOR_YCBCR_BT601) |
6544 			BIT(DRM_COLOR_YCBCR_BT709) |
6545 			BIT(DRM_COLOR_YCBCR_BT2020),
6546 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6547 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6548 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6549 	}
6550 
6551 	supported_rotations =
6552 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6553 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6554 
6555 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6556 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6557 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6558 						   supported_rotations);
6559 
6560 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6561 
6562 	/* Create (reset) the plane state */
6563 	if (plane->funcs->reset)
6564 		plane->funcs->reset(plane);
6565 
6566 	return 0;
6567 }
6568 
6569 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6570 			       struct drm_plane *plane,
6571 			       uint32_t crtc_index)
6572 {
6573 	struct amdgpu_crtc *acrtc = NULL;
6574 	struct drm_plane *cursor_plane;
6575 
6576 	int res = -ENOMEM;
6577 
6578 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6579 	if (!cursor_plane)
6580 		goto fail;
6581 
6582 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6583 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6584 
6585 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6586 	if (!acrtc)
6587 		goto fail;
6588 
6589 	res = drm_crtc_init_with_planes(
6590 			dm->ddev,
6591 			&acrtc->base,
6592 			plane,
6593 			cursor_plane,
6594 			&amdgpu_dm_crtc_funcs, NULL);
6595 
6596 	if (res)
6597 		goto fail;
6598 
6599 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6600 
6601 	/* Create (reset) the plane state */
6602 	if (acrtc->base.funcs->reset)
6603 		acrtc->base.funcs->reset(&acrtc->base);
6604 
6605 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6606 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6607 
6608 	acrtc->crtc_id = crtc_index;
6609 	acrtc->base.enabled = false;
6610 	acrtc->otg_inst = -1;
6611 
6612 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6613 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6614 				   true, MAX_COLOR_LUT_ENTRIES);
6615 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6616 
6617 	return 0;
6618 
6619 fail:
6620 	kfree(acrtc);
6621 	kfree(cursor_plane);
6622 	return res;
6623 }
6624 
6625 
6626 static int to_drm_connector_type(enum signal_type st)
6627 {
6628 	switch (st) {
6629 	case SIGNAL_TYPE_HDMI_TYPE_A:
6630 		return DRM_MODE_CONNECTOR_HDMIA;
6631 	case SIGNAL_TYPE_EDP:
6632 		return DRM_MODE_CONNECTOR_eDP;
6633 	case SIGNAL_TYPE_LVDS:
6634 		return DRM_MODE_CONNECTOR_LVDS;
6635 	case SIGNAL_TYPE_RGB:
6636 		return DRM_MODE_CONNECTOR_VGA;
6637 	case SIGNAL_TYPE_DISPLAY_PORT:
6638 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6639 		return DRM_MODE_CONNECTOR_DisplayPort;
6640 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6641 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6642 		return DRM_MODE_CONNECTOR_DVID;
6643 	case SIGNAL_TYPE_VIRTUAL:
6644 		return DRM_MODE_CONNECTOR_VIRTUAL;
6645 
6646 	default:
6647 		return DRM_MODE_CONNECTOR_Unknown;
6648 	}
6649 }
6650 
6651 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6652 {
6653 	struct drm_encoder *encoder;
6654 
6655 	/* There is only one encoder per connector */
6656 	drm_connector_for_each_possible_encoder(connector, encoder)
6657 		return encoder;
6658 
6659 	return NULL;
6660 }
6661 
6662 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6663 {
6664 	struct drm_encoder *encoder;
6665 	struct amdgpu_encoder *amdgpu_encoder;
6666 
6667 	encoder = amdgpu_dm_connector_to_encoder(connector);
6668 
6669 	if (encoder == NULL)
6670 		return;
6671 
6672 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6673 
6674 	amdgpu_encoder->native_mode.clock = 0;
6675 
6676 	if (!list_empty(&connector->probed_modes)) {
6677 		struct drm_display_mode *preferred_mode = NULL;
6678 
6679 		list_for_each_entry(preferred_mode,
6680 				    &connector->probed_modes,
6681 				    head) {
6682 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6683 				amdgpu_encoder->native_mode = *preferred_mode;
6684 
6685 			break;
6686 		}
6687 
6688 	}
6689 }
6690 
6691 static struct drm_display_mode *
6692 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6693 			     char *name,
6694 			     int hdisplay, int vdisplay)
6695 {
6696 	struct drm_device *dev = encoder->dev;
6697 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6698 	struct drm_display_mode *mode = NULL;
6699 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6700 
6701 	mode = drm_mode_duplicate(dev, native_mode);
6702 
6703 	if (mode == NULL)
6704 		return NULL;
6705 
6706 	mode->hdisplay = hdisplay;
6707 	mode->vdisplay = vdisplay;
6708 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6709 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6710 
6711 	return mode;
6712 
6713 }
6714 
6715 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6716 						 struct drm_connector *connector)
6717 {
6718 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6719 	struct drm_display_mode *mode = NULL;
6720 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6721 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6722 				to_amdgpu_dm_connector(connector);
6723 	int i;
6724 	int n;
6725 	struct mode_size {
6726 		char name[DRM_DISPLAY_MODE_LEN];
6727 		int w;
6728 		int h;
6729 	} common_modes[] = {
6730 		{  "640x480",  640,  480},
6731 		{  "800x600",  800,  600},
6732 		{ "1024x768", 1024,  768},
6733 		{ "1280x720", 1280,  720},
6734 		{ "1280x800", 1280,  800},
6735 		{"1280x1024", 1280, 1024},
6736 		{ "1440x900", 1440,  900},
6737 		{"1680x1050", 1680, 1050},
6738 		{"1600x1200", 1600, 1200},
6739 		{"1920x1080", 1920, 1080},
6740 		{"1920x1200", 1920, 1200}
6741 	};
6742 
6743 	n = ARRAY_SIZE(common_modes);
6744 
6745 	for (i = 0; i < n; i++) {
6746 		struct drm_display_mode *curmode = NULL;
6747 		bool mode_existed = false;
6748 
6749 		if (common_modes[i].w > native_mode->hdisplay ||
6750 		    common_modes[i].h > native_mode->vdisplay ||
6751 		   (common_modes[i].w == native_mode->hdisplay &&
6752 		    common_modes[i].h == native_mode->vdisplay))
6753 			continue;
6754 
6755 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6756 			if (common_modes[i].w == curmode->hdisplay &&
6757 			    common_modes[i].h == curmode->vdisplay) {
6758 				mode_existed = true;
6759 				break;
6760 			}
6761 		}
6762 
6763 		if (mode_existed)
6764 			continue;
6765 
6766 		mode = amdgpu_dm_create_common_mode(encoder,
6767 				common_modes[i].name, common_modes[i].w,
6768 				common_modes[i].h);
6769 		drm_mode_probed_add(connector, mode);
6770 		amdgpu_dm_connector->num_modes++;
6771 	}
6772 }
6773 
6774 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6775 					      struct edid *edid)
6776 {
6777 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6778 			to_amdgpu_dm_connector(connector);
6779 
6780 	if (edid) {
6781 		/* empty probed_modes */
6782 		INIT_LIST_HEAD(&connector->probed_modes);
6783 		amdgpu_dm_connector->num_modes =
6784 				drm_add_edid_modes(connector, edid);
6785 
6786 		/* sorting the probed modes before calling function
6787 		 * amdgpu_dm_get_native_mode() since EDID can have
6788 		 * more than one preferred mode. The modes that are
6789 		 * later in the probed mode list could be of higher
6790 		 * and preferred resolution. For example, 3840x2160
6791 		 * resolution in base EDID preferred timing and 4096x2160
6792 		 * preferred resolution in DID extension block later.
6793 		 */
6794 		drm_mode_sort(&connector->probed_modes);
6795 		amdgpu_dm_get_native_mode(connector);
6796 	} else {
6797 		amdgpu_dm_connector->num_modes = 0;
6798 	}
6799 }
6800 
6801 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6802 {
6803 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6804 			to_amdgpu_dm_connector(connector);
6805 	struct drm_encoder *encoder;
6806 	struct edid *edid = amdgpu_dm_connector->edid;
6807 
6808 	encoder = amdgpu_dm_connector_to_encoder(connector);
6809 
6810 	if (!drm_edid_is_valid(edid)) {
6811 		amdgpu_dm_connector->num_modes =
6812 				drm_add_modes_noedid(connector, 640, 480);
6813 	} else {
6814 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6815 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6816 	}
6817 	amdgpu_dm_fbc_init(connector);
6818 
6819 	return amdgpu_dm_connector->num_modes;
6820 }
6821 
6822 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6823 				     struct amdgpu_dm_connector *aconnector,
6824 				     int connector_type,
6825 				     struct dc_link *link,
6826 				     int link_index)
6827 {
6828 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6829 
6830 	/*
6831 	 * Some of the properties below require access to state, like bpc.
6832 	 * Allocate some default initial connector state with our reset helper.
6833 	 */
6834 	if (aconnector->base.funcs->reset)
6835 		aconnector->base.funcs->reset(&aconnector->base);
6836 
6837 	aconnector->connector_id = link_index;
6838 	aconnector->dc_link = link;
6839 	aconnector->base.interlace_allowed = false;
6840 	aconnector->base.doublescan_allowed = false;
6841 	aconnector->base.stereo_allowed = false;
6842 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6843 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6844 	aconnector->audio_inst = -1;
6845 	mutex_init(&aconnector->hpd_lock);
6846 
6847 	/*
6848 	 * configure support HPD hot plug connector_>polled default value is 0
6849 	 * which means HPD hot plug not supported
6850 	 */
6851 	switch (connector_type) {
6852 	case DRM_MODE_CONNECTOR_HDMIA:
6853 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6854 		aconnector->base.ycbcr_420_allowed =
6855 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6856 		break;
6857 	case DRM_MODE_CONNECTOR_DisplayPort:
6858 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6859 		aconnector->base.ycbcr_420_allowed =
6860 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6861 		break;
6862 	case DRM_MODE_CONNECTOR_DVID:
6863 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6864 		break;
6865 	default:
6866 		break;
6867 	}
6868 
6869 	drm_object_attach_property(&aconnector->base.base,
6870 				dm->ddev->mode_config.scaling_mode_property,
6871 				DRM_MODE_SCALE_NONE);
6872 
6873 	drm_object_attach_property(&aconnector->base.base,
6874 				adev->mode_info.underscan_property,
6875 				UNDERSCAN_OFF);
6876 	drm_object_attach_property(&aconnector->base.base,
6877 				adev->mode_info.underscan_hborder_property,
6878 				0);
6879 	drm_object_attach_property(&aconnector->base.base,
6880 				adev->mode_info.underscan_vborder_property,
6881 				0);
6882 
6883 	if (!aconnector->mst_port)
6884 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6885 
6886 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6887 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6888 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6889 
6890 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6891 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6892 		drm_object_attach_property(&aconnector->base.base,
6893 				adev->mode_info.abm_level_property, 0);
6894 	}
6895 
6896 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6897 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6898 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6899 		drm_object_attach_property(
6900 			&aconnector->base.base,
6901 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6902 
6903 		if (!aconnector->mst_port)
6904 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6905 
6906 #ifdef CONFIG_DRM_AMD_DC_HDCP
6907 		if (adev->dm.hdcp_workqueue)
6908 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6909 #endif
6910 	}
6911 }
6912 
6913 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6914 			      struct i2c_msg *msgs, int num)
6915 {
6916 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6917 	struct ddc_service *ddc_service = i2c->ddc_service;
6918 	struct i2c_command cmd;
6919 	int i;
6920 	int result = -EIO;
6921 
6922 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6923 
6924 	if (!cmd.payloads)
6925 		return result;
6926 
6927 	cmd.number_of_payloads = num;
6928 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6929 	cmd.speed = 100;
6930 
6931 	for (i = 0; i < num; i++) {
6932 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6933 		cmd.payloads[i].address = msgs[i].addr;
6934 		cmd.payloads[i].length = msgs[i].len;
6935 		cmd.payloads[i].data = msgs[i].buf;
6936 	}
6937 
6938 	if (dc_submit_i2c(
6939 			ddc_service->ctx->dc,
6940 			ddc_service->ddc_pin->hw_info.ddc_channel,
6941 			&cmd))
6942 		result = num;
6943 
6944 	kfree(cmd.payloads);
6945 	return result;
6946 }
6947 
6948 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6949 {
6950 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6951 }
6952 
6953 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6954 	.master_xfer = amdgpu_dm_i2c_xfer,
6955 	.functionality = amdgpu_dm_i2c_func,
6956 };
6957 
6958 static struct amdgpu_i2c_adapter *
6959 create_i2c(struct ddc_service *ddc_service,
6960 	   int link_index,
6961 	   int *res)
6962 {
6963 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6964 	struct amdgpu_i2c_adapter *i2c;
6965 
6966 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6967 	if (!i2c)
6968 		return NULL;
6969 	i2c->base.owner = THIS_MODULE;
6970 	i2c->base.class = I2C_CLASS_DDC;
6971 	i2c->base.dev.parent = &adev->pdev->dev;
6972 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6973 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6974 	i2c_set_adapdata(&i2c->base, i2c);
6975 	i2c->ddc_service = ddc_service;
6976 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6977 
6978 	return i2c;
6979 }
6980 
6981 
6982 /*
6983  * Note: this function assumes that dc_link_detect() was called for the
6984  * dc_link which will be represented by this aconnector.
6985  */
6986 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6987 				    struct amdgpu_dm_connector *aconnector,
6988 				    uint32_t link_index,
6989 				    struct amdgpu_encoder *aencoder)
6990 {
6991 	int res = 0;
6992 	int connector_type;
6993 	struct dc *dc = dm->dc;
6994 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6995 	struct amdgpu_i2c_adapter *i2c;
6996 
6997 	link->priv = aconnector;
6998 
6999 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7000 
7001 	i2c = create_i2c(link->ddc, link->link_index, &res);
7002 	if (!i2c) {
7003 		DRM_ERROR("Failed to create i2c adapter data\n");
7004 		return -ENOMEM;
7005 	}
7006 
7007 	aconnector->i2c = i2c;
7008 	res = i2c_add_adapter(&i2c->base);
7009 
7010 	if (res) {
7011 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7012 		goto out_free;
7013 	}
7014 
7015 	connector_type = to_drm_connector_type(link->connector_signal);
7016 
7017 	res = drm_connector_init_with_ddc(
7018 			dm->ddev,
7019 			&aconnector->base,
7020 			&amdgpu_dm_connector_funcs,
7021 			connector_type,
7022 			&i2c->base);
7023 
7024 	if (res) {
7025 		DRM_ERROR("connector_init failed\n");
7026 		aconnector->connector_id = -1;
7027 		goto out_free;
7028 	}
7029 
7030 	drm_connector_helper_add(
7031 			&aconnector->base,
7032 			&amdgpu_dm_connector_helper_funcs);
7033 
7034 	amdgpu_dm_connector_init_helper(
7035 		dm,
7036 		aconnector,
7037 		connector_type,
7038 		link,
7039 		link_index);
7040 
7041 	drm_connector_attach_encoder(
7042 		&aconnector->base, &aencoder->base);
7043 
7044 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7045 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7046 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7047 
7048 out_free:
7049 	if (res) {
7050 		kfree(i2c);
7051 		aconnector->i2c = NULL;
7052 	}
7053 	return res;
7054 }
7055 
7056 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7057 {
7058 	switch (adev->mode_info.num_crtc) {
7059 	case 1:
7060 		return 0x1;
7061 	case 2:
7062 		return 0x3;
7063 	case 3:
7064 		return 0x7;
7065 	case 4:
7066 		return 0xf;
7067 	case 5:
7068 		return 0x1f;
7069 	case 6:
7070 	default:
7071 		return 0x3f;
7072 	}
7073 }
7074 
7075 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7076 				  struct amdgpu_encoder *aencoder,
7077 				  uint32_t link_index)
7078 {
7079 	struct amdgpu_device *adev = drm_to_adev(dev);
7080 
7081 	int res = drm_encoder_init(dev,
7082 				   &aencoder->base,
7083 				   &amdgpu_dm_encoder_funcs,
7084 				   DRM_MODE_ENCODER_TMDS,
7085 				   NULL);
7086 
7087 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7088 
7089 	if (!res)
7090 		aencoder->encoder_id = link_index;
7091 	else
7092 		aencoder->encoder_id = -1;
7093 
7094 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7095 
7096 	return res;
7097 }
7098 
7099 static void manage_dm_interrupts(struct amdgpu_device *adev,
7100 				 struct amdgpu_crtc *acrtc,
7101 				 bool enable)
7102 {
7103 	/*
7104 	 * We have no guarantee that the frontend index maps to the same
7105 	 * backend index - some even map to more than one.
7106 	 *
7107 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7108 	 */
7109 	int irq_type =
7110 		amdgpu_display_crtc_idx_to_irq_type(
7111 			adev,
7112 			acrtc->crtc_id);
7113 
7114 	if (enable) {
7115 		drm_crtc_vblank_on(&acrtc->base);
7116 		amdgpu_irq_get(
7117 			adev,
7118 			&adev->pageflip_irq,
7119 			irq_type);
7120 	} else {
7121 
7122 		amdgpu_irq_put(
7123 			adev,
7124 			&adev->pageflip_irq,
7125 			irq_type);
7126 		drm_crtc_vblank_off(&acrtc->base);
7127 	}
7128 }
7129 
7130 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7131 				      struct amdgpu_crtc *acrtc)
7132 {
7133 	int irq_type =
7134 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7135 
7136 	/**
7137 	 * This reads the current state for the IRQ and force reapplies
7138 	 * the setting to hardware.
7139 	 */
7140 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7141 }
7142 
7143 static bool
7144 is_scaling_state_different(const struct dm_connector_state *dm_state,
7145 			   const struct dm_connector_state *old_dm_state)
7146 {
7147 	if (dm_state->scaling != old_dm_state->scaling)
7148 		return true;
7149 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7150 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7151 			return true;
7152 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7153 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7154 			return true;
7155 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7156 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7157 		return true;
7158 	return false;
7159 }
7160 
7161 #ifdef CONFIG_DRM_AMD_DC_HDCP
7162 static bool is_content_protection_different(struct drm_connector_state *state,
7163 					    const struct drm_connector_state *old_state,
7164 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7165 {
7166 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7167 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7168 
7169 	/* Handle: Type0/1 change */
7170 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7171 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7172 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7173 		return true;
7174 	}
7175 
7176 	/* CP is being re enabled, ignore this
7177 	 *
7178 	 * Handles:	ENABLED -> DESIRED
7179 	 */
7180 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7181 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7182 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7183 		return false;
7184 	}
7185 
7186 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7187 	 *
7188 	 * Handles:	UNDESIRED -> ENABLED
7189 	 */
7190 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7191 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7192 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7193 
7194 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7195 	 * hot-plug, headless s3, dpms
7196 	 *
7197 	 * Handles:	DESIRED -> DESIRED (Special case)
7198 	 */
7199 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7200 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7201 		dm_con_state->update_hdcp = false;
7202 		return true;
7203 	}
7204 
7205 	/*
7206 	 * Handles:	UNDESIRED -> UNDESIRED
7207 	 *		DESIRED -> DESIRED
7208 	 *		ENABLED -> ENABLED
7209 	 */
7210 	if (old_state->content_protection == state->content_protection)
7211 		return false;
7212 
7213 	/*
7214 	 * Handles:	UNDESIRED -> DESIRED
7215 	 *		DESIRED -> UNDESIRED
7216 	 *		ENABLED -> UNDESIRED
7217 	 */
7218 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7219 		return true;
7220 
7221 	/*
7222 	 * Handles:	DESIRED -> ENABLED
7223 	 */
7224 	return false;
7225 }
7226 
7227 #endif
7228 static void remove_stream(struct amdgpu_device *adev,
7229 			  struct amdgpu_crtc *acrtc,
7230 			  struct dc_stream_state *stream)
7231 {
7232 	/* this is the update mode case */
7233 
7234 	acrtc->otg_inst = -1;
7235 	acrtc->enabled = false;
7236 }
7237 
7238 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7239 			       struct dc_cursor_position *position)
7240 {
7241 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7242 	int x, y;
7243 	int xorigin = 0, yorigin = 0;
7244 
7245 	position->enable = false;
7246 	position->x = 0;
7247 	position->y = 0;
7248 
7249 	if (!crtc || !plane->state->fb)
7250 		return 0;
7251 
7252 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7253 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7254 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7255 			  __func__,
7256 			  plane->state->crtc_w,
7257 			  plane->state->crtc_h);
7258 		return -EINVAL;
7259 	}
7260 
7261 	x = plane->state->crtc_x;
7262 	y = plane->state->crtc_y;
7263 
7264 	if (x <= -amdgpu_crtc->max_cursor_width ||
7265 	    y <= -amdgpu_crtc->max_cursor_height)
7266 		return 0;
7267 
7268 	if (x < 0) {
7269 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7270 		x = 0;
7271 	}
7272 	if (y < 0) {
7273 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7274 		y = 0;
7275 	}
7276 	position->enable = true;
7277 	position->translate_by_source = true;
7278 	position->x = x;
7279 	position->y = y;
7280 	position->x_hotspot = xorigin;
7281 	position->y_hotspot = yorigin;
7282 
7283 	return 0;
7284 }
7285 
7286 static void handle_cursor_update(struct drm_plane *plane,
7287 				 struct drm_plane_state *old_plane_state)
7288 {
7289 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7290 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7291 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7292 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7293 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7294 	uint64_t address = afb ? afb->address : 0;
7295 	struct dc_cursor_position position;
7296 	struct dc_cursor_attributes attributes;
7297 	int ret;
7298 
7299 	if (!plane->state->fb && !old_plane_state->fb)
7300 		return;
7301 
7302 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7303 			 __func__,
7304 			 amdgpu_crtc->crtc_id,
7305 			 plane->state->crtc_w,
7306 			 plane->state->crtc_h);
7307 
7308 	ret = get_cursor_position(plane, crtc, &position);
7309 	if (ret)
7310 		return;
7311 
7312 	if (!position.enable) {
7313 		/* turn off cursor */
7314 		if (crtc_state && crtc_state->stream) {
7315 			mutex_lock(&adev->dm.dc_lock);
7316 			dc_stream_set_cursor_position(crtc_state->stream,
7317 						      &position);
7318 			mutex_unlock(&adev->dm.dc_lock);
7319 		}
7320 		return;
7321 	}
7322 
7323 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7324 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7325 
7326 	memset(&attributes, 0, sizeof(attributes));
7327 	attributes.address.high_part = upper_32_bits(address);
7328 	attributes.address.low_part  = lower_32_bits(address);
7329 	attributes.width             = plane->state->crtc_w;
7330 	attributes.height            = plane->state->crtc_h;
7331 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7332 	attributes.rotation_angle    = 0;
7333 	attributes.attribute_flags.value = 0;
7334 
7335 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7336 
7337 	if (crtc_state->stream) {
7338 		mutex_lock(&adev->dm.dc_lock);
7339 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7340 							 &attributes))
7341 			DRM_ERROR("DC failed to set cursor attributes\n");
7342 
7343 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7344 						   &position))
7345 			DRM_ERROR("DC failed to set cursor position\n");
7346 		mutex_unlock(&adev->dm.dc_lock);
7347 	}
7348 }
7349 
7350 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7351 {
7352 
7353 	assert_spin_locked(&acrtc->base.dev->event_lock);
7354 	WARN_ON(acrtc->event);
7355 
7356 	acrtc->event = acrtc->base.state->event;
7357 
7358 	/* Set the flip status */
7359 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7360 
7361 	/* Mark this event as consumed */
7362 	acrtc->base.state->event = NULL;
7363 
7364 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7365 						 acrtc->crtc_id);
7366 }
7367 
7368 static void update_freesync_state_on_stream(
7369 	struct amdgpu_display_manager *dm,
7370 	struct dm_crtc_state *new_crtc_state,
7371 	struct dc_stream_state *new_stream,
7372 	struct dc_plane_state *surface,
7373 	u32 flip_timestamp_in_us)
7374 {
7375 	struct mod_vrr_params vrr_params;
7376 	struct dc_info_packet vrr_infopacket = {0};
7377 	struct amdgpu_device *adev = dm->adev;
7378 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7379 	unsigned long flags;
7380 
7381 	if (!new_stream)
7382 		return;
7383 
7384 	/*
7385 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7386 	 * For now it's sufficient to just guard against these conditions.
7387 	 */
7388 
7389 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7390 		return;
7391 
7392 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7393         vrr_params = acrtc->dm_irq_params.vrr_params;
7394 
7395 	if (surface) {
7396 		mod_freesync_handle_preflip(
7397 			dm->freesync_module,
7398 			surface,
7399 			new_stream,
7400 			flip_timestamp_in_us,
7401 			&vrr_params);
7402 
7403 		if (adev->family < AMDGPU_FAMILY_AI &&
7404 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7405 			mod_freesync_handle_v_update(dm->freesync_module,
7406 						     new_stream, &vrr_params);
7407 
7408 			/* Need to call this before the frame ends. */
7409 			dc_stream_adjust_vmin_vmax(dm->dc,
7410 						   new_crtc_state->stream,
7411 						   &vrr_params.adjust);
7412 		}
7413 	}
7414 
7415 	mod_freesync_build_vrr_infopacket(
7416 		dm->freesync_module,
7417 		new_stream,
7418 		&vrr_params,
7419 		PACKET_TYPE_VRR,
7420 		TRANSFER_FUNC_UNKNOWN,
7421 		&vrr_infopacket);
7422 
7423 	new_crtc_state->freesync_timing_changed |=
7424 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7425 			&vrr_params.adjust,
7426 			sizeof(vrr_params.adjust)) != 0);
7427 
7428 	new_crtc_state->freesync_vrr_info_changed |=
7429 		(memcmp(&new_crtc_state->vrr_infopacket,
7430 			&vrr_infopacket,
7431 			sizeof(vrr_infopacket)) != 0);
7432 
7433 	acrtc->dm_irq_params.vrr_params = vrr_params;
7434 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7435 
7436 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7437 	new_stream->vrr_infopacket = vrr_infopacket;
7438 
7439 	if (new_crtc_state->freesync_vrr_info_changed)
7440 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7441 			      new_crtc_state->base.crtc->base.id,
7442 			      (int)new_crtc_state->base.vrr_enabled,
7443 			      (int)vrr_params.state);
7444 
7445 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7446 }
7447 
7448 static void update_stream_irq_parameters(
7449 	struct amdgpu_display_manager *dm,
7450 	struct dm_crtc_state *new_crtc_state)
7451 {
7452 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7453 	struct mod_vrr_params vrr_params;
7454 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7455 	struct amdgpu_device *adev = dm->adev;
7456 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7457 	unsigned long flags;
7458 
7459 	if (!new_stream)
7460 		return;
7461 
7462 	/*
7463 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7464 	 * For now it's sufficient to just guard against these conditions.
7465 	 */
7466 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7467 		return;
7468 
7469 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7470 	vrr_params = acrtc->dm_irq_params.vrr_params;
7471 
7472 	if (new_crtc_state->vrr_supported &&
7473 	    config.min_refresh_in_uhz &&
7474 	    config.max_refresh_in_uhz) {
7475 		config.state = new_crtc_state->base.vrr_enabled ?
7476 			VRR_STATE_ACTIVE_VARIABLE :
7477 			VRR_STATE_INACTIVE;
7478 	} else {
7479 		config.state = VRR_STATE_UNSUPPORTED;
7480 	}
7481 
7482 	mod_freesync_build_vrr_params(dm->freesync_module,
7483 				      new_stream,
7484 				      &config, &vrr_params);
7485 
7486 	new_crtc_state->freesync_timing_changed |=
7487 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7488 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7489 
7490 	new_crtc_state->freesync_config = config;
7491 	/* Copy state for access from DM IRQ handler */
7492 	acrtc->dm_irq_params.freesync_config = config;
7493 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7494 	acrtc->dm_irq_params.vrr_params = vrr_params;
7495 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7496 }
7497 
7498 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7499 					    struct dm_crtc_state *new_state)
7500 {
7501 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7502 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7503 
7504 	if (!old_vrr_active && new_vrr_active) {
7505 		/* Transition VRR inactive -> active:
7506 		 * While VRR is active, we must not disable vblank irq, as a
7507 		 * reenable after disable would compute bogus vblank/pflip
7508 		 * timestamps if it likely happened inside display front-porch.
7509 		 *
7510 		 * We also need vupdate irq for the actual core vblank handling
7511 		 * at end of vblank.
7512 		 */
7513 		dm_set_vupdate_irq(new_state->base.crtc, true);
7514 		drm_crtc_vblank_get(new_state->base.crtc);
7515 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7516 				 __func__, new_state->base.crtc->base.id);
7517 	} else if (old_vrr_active && !new_vrr_active) {
7518 		/* Transition VRR active -> inactive:
7519 		 * Allow vblank irq disable again for fixed refresh rate.
7520 		 */
7521 		dm_set_vupdate_irq(new_state->base.crtc, false);
7522 		drm_crtc_vblank_put(new_state->base.crtc);
7523 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7524 				 __func__, new_state->base.crtc->base.id);
7525 	}
7526 }
7527 
7528 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7529 {
7530 	struct drm_plane *plane;
7531 	struct drm_plane_state *old_plane_state, *new_plane_state;
7532 	int i;
7533 
7534 	/*
7535 	 * TODO: Make this per-stream so we don't issue redundant updates for
7536 	 * commits with multiple streams.
7537 	 */
7538 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7539 				       new_plane_state, i)
7540 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7541 			handle_cursor_update(plane, old_plane_state);
7542 }
7543 
7544 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7545 				    struct dc_state *dc_state,
7546 				    struct drm_device *dev,
7547 				    struct amdgpu_display_manager *dm,
7548 				    struct drm_crtc *pcrtc,
7549 				    bool wait_for_vblank)
7550 {
7551 	int i;
7552 	uint64_t timestamp_ns;
7553 	struct drm_plane *plane;
7554 	struct drm_plane_state *old_plane_state, *new_plane_state;
7555 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7556 	struct drm_crtc_state *new_pcrtc_state =
7557 			drm_atomic_get_new_crtc_state(state, pcrtc);
7558 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7559 	struct dm_crtc_state *dm_old_crtc_state =
7560 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7561 	int planes_count = 0, vpos, hpos;
7562 	long r;
7563 	unsigned long flags;
7564 	struct amdgpu_bo *abo;
7565 	uint32_t target_vblank, last_flip_vblank;
7566 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7567 	bool pflip_present = false;
7568 	struct {
7569 		struct dc_surface_update surface_updates[MAX_SURFACES];
7570 		struct dc_plane_info plane_infos[MAX_SURFACES];
7571 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7572 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7573 		struct dc_stream_update stream_update;
7574 	} *bundle;
7575 
7576 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7577 
7578 	if (!bundle) {
7579 		dm_error("Failed to allocate update bundle\n");
7580 		goto cleanup;
7581 	}
7582 
7583 	/*
7584 	 * Disable the cursor first if we're disabling all the planes.
7585 	 * It'll remain on the screen after the planes are re-enabled
7586 	 * if we don't.
7587 	 */
7588 	if (acrtc_state->active_planes == 0)
7589 		amdgpu_dm_commit_cursors(state);
7590 
7591 	/* update planes when needed */
7592 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7593 		struct drm_crtc *crtc = new_plane_state->crtc;
7594 		struct drm_crtc_state *new_crtc_state;
7595 		struct drm_framebuffer *fb = new_plane_state->fb;
7596 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7597 		bool plane_needs_flip;
7598 		struct dc_plane_state *dc_plane;
7599 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7600 
7601 		/* Cursor plane is handled after stream updates */
7602 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7603 			continue;
7604 
7605 		if (!fb || !crtc || pcrtc != crtc)
7606 			continue;
7607 
7608 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7609 		if (!new_crtc_state->active)
7610 			continue;
7611 
7612 		dc_plane = dm_new_plane_state->dc_state;
7613 
7614 		bundle->surface_updates[planes_count].surface = dc_plane;
7615 		if (new_pcrtc_state->color_mgmt_changed) {
7616 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7617 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7618 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7619 		}
7620 
7621 		fill_dc_scaling_info(new_plane_state,
7622 				     &bundle->scaling_infos[planes_count]);
7623 
7624 		bundle->surface_updates[planes_count].scaling_info =
7625 			&bundle->scaling_infos[planes_count];
7626 
7627 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7628 
7629 		pflip_present = pflip_present || plane_needs_flip;
7630 
7631 		if (!plane_needs_flip) {
7632 			planes_count += 1;
7633 			continue;
7634 		}
7635 
7636 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7637 
7638 		/*
7639 		 * Wait for all fences on this FB. Do limited wait to avoid
7640 		 * deadlock during GPU reset when this fence will not signal
7641 		 * but we hold reservation lock for the BO.
7642 		 */
7643 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7644 							false,
7645 							msecs_to_jiffies(5000));
7646 		if (unlikely(r <= 0))
7647 			DRM_ERROR("Waiting for fences timed out!");
7648 
7649 		fill_dc_plane_info_and_addr(
7650 			dm->adev, new_plane_state,
7651 			afb->tiling_flags,
7652 			&bundle->plane_infos[planes_count],
7653 			&bundle->flip_addrs[planes_count].address,
7654 			afb->tmz_surface, false);
7655 
7656 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7657 				 new_plane_state->plane->index,
7658 				 bundle->plane_infos[planes_count].dcc.enable);
7659 
7660 		bundle->surface_updates[planes_count].plane_info =
7661 			&bundle->plane_infos[planes_count];
7662 
7663 		/*
7664 		 * Only allow immediate flips for fast updates that don't
7665 		 * change FB pitch, DCC state, rotation or mirroing.
7666 		 */
7667 		bundle->flip_addrs[planes_count].flip_immediate =
7668 			crtc->state->async_flip &&
7669 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7670 
7671 		timestamp_ns = ktime_get_ns();
7672 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7673 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7674 		bundle->surface_updates[planes_count].surface = dc_plane;
7675 
7676 		if (!bundle->surface_updates[planes_count].surface) {
7677 			DRM_ERROR("No surface for CRTC: id=%d\n",
7678 					acrtc_attach->crtc_id);
7679 			continue;
7680 		}
7681 
7682 		if (plane == pcrtc->primary)
7683 			update_freesync_state_on_stream(
7684 				dm,
7685 				acrtc_state,
7686 				acrtc_state->stream,
7687 				dc_plane,
7688 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7689 
7690 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7691 				 __func__,
7692 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7693 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7694 
7695 		planes_count += 1;
7696 
7697 	}
7698 
7699 	if (pflip_present) {
7700 		if (!vrr_active) {
7701 			/* Use old throttling in non-vrr fixed refresh rate mode
7702 			 * to keep flip scheduling based on target vblank counts
7703 			 * working in a backwards compatible way, e.g., for
7704 			 * clients using the GLX_OML_sync_control extension or
7705 			 * DRI3/Present extension with defined target_msc.
7706 			 */
7707 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7708 		}
7709 		else {
7710 			/* For variable refresh rate mode only:
7711 			 * Get vblank of last completed flip to avoid > 1 vrr
7712 			 * flips per video frame by use of throttling, but allow
7713 			 * flip programming anywhere in the possibly large
7714 			 * variable vrr vblank interval for fine-grained flip
7715 			 * timing control and more opportunity to avoid stutter
7716 			 * on late submission of flips.
7717 			 */
7718 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7719 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7720 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7721 		}
7722 
7723 		target_vblank = last_flip_vblank + wait_for_vblank;
7724 
7725 		/*
7726 		 * Wait until we're out of the vertical blank period before the one
7727 		 * targeted by the flip
7728 		 */
7729 		while ((acrtc_attach->enabled &&
7730 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7731 							    0, &vpos, &hpos, NULL,
7732 							    NULL, &pcrtc->hwmode)
7733 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7734 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7735 			(int)(target_vblank -
7736 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7737 			usleep_range(1000, 1100);
7738 		}
7739 
7740 		/**
7741 		 * Prepare the flip event for the pageflip interrupt to handle.
7742 		 *
7743 		 * This only works in the case where we've already turned on the
7744 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7745 		 * from 0 -> n planes we have to skip a hardware generated event
7746 		 * and rely on sending it from software.
7747 		 */
7748 		if (acrtc_attach->base.state->event &&
7749 		    acrtc_state->active_planes > 0) {
7750 			drm_crtc_vblank_get(pcrtc);
7751 
7752 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7753 
7754 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7755 			prepare_flip_isr(acrtc_attach);
7756 
7757 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7758 		}
7759 
7760 		if (acrtc_state->stream) {
7761 			if (acrtc_state->freesync_vrr_info_changed)
7762 				bundle->stream_update.vrr_infopacket =
7763 					&acrtc_state->stream->vrr_infopacket;
7764 		}
7765 	}
7766 
7767 	/* Update the planes if changed or disable if we don't have any. */
7768 	if ((planes_count || acrtc_state->active_planes == 0) &&
7769 		acrtc_state->stream) {
7770 		bundle->stream_update.stream = acrtc_state->stream;
7771 		if (new_pcrtc_state->mode_changed) {
7772 			bundle->stream_update.src = acrtc_state->stream->src;
7773 			bundle->stream_update.dst = acrtc_state->stream->dst;
7774 		}
7775 
7776 		if (new_pcrtc_state->color_mgmt_changed) {
7777 			/*
7778 			 * TODO: This isn't fully correct since we've actually
7779 			 * already modified the stream in place.
7780 			 */
7781 			bundle->stream_update.gamut_remap =
7782 				&acrtc_state->stream->gamut_remap_matrix;
7783 			bundle->stream_update.output_csc_transform =
7784 				&acrtc_state->stream->csc_color_matrix;
7785 			bundle->stream_update.out_transfer_func =
7786 				acrtc_state->stream->out_transfer_func;
7787 		}
7788 
7789 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7790 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7791 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7792 
7793 		/*
7794 		 * If FreeSync state on the stream has changed then we need to
7795 		 * re-adjust the min/max bounds now that DC doesn't handle this
7796 		 * as part of commit.
7797 		 */
7798 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7799 		    amdgpu_dm_vrr_active(acrtc_state)) {
7800 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7801 			dc_stream_adjust_vmin_vmax(
7802 				dm->dc, acrtc_state->stream,
7803 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7804 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7805 		}
7806 		mutex_lock(&dm->dc_lock);
7807 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7808 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7809 			amdgpu_dm_psr_disable(acrtc_state->stream);
7810 
7811 		dc_commit_updates_for_stream(dm->dc,
7812 						     bundle->surface_updates,
7813 						     planes_count,
7814 						     acrtc_state->stream,
7815 						     &bundle->stream_update);
7816 
7817 		/**
7818 		 * Enable or disable the interrupts on the backend.
7819 		 *
7820 		 * Most pipes are put into power gating when unused.
7821 		 *
7822 		 * When power gating is enabled on a pipe we lose the
7823 		 * interrupt enablement state when power gating is disabled.
7824 		 *
7825 		 * So we need to update the IRQ control state in hardware
7826 		 * whenever the pipe turns on (since it could be previously
7827 		 * power gated) or off (since some pipes can't be power gated
7828 		 * on some ASICs).
7829 		 */
7830 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7831 			dm_update_pflip_irq_state(drm_to_adev(dev),
7832 						  acrtc_attach);
7833 
7834 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7835 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7836 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7837 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7838 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7839 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7840 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7841 			amdgpu_dm_psr_enable(acrtc_state->stream);
7842 		}
7843 
7844 		mutex_unlock(&dm->dc_lock);
7845 	}
7846 
7847 	/*
7848 	 * Update cursor state *after* programming all the planes.
7849 	 * This avoids redundant programming in the case where we're going
7850 	 * to be disabling a single plane - those pipes are being disabled.
7851 	 */
7852 	if (acrtc_state->active_planes)
7853 		amdgpu_dm_commit_cursors(state);
7854 
7855 cleanup:
7856 	kfree(bundle);
7857 }
7858 
7859 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7860 				   struct drm_atomic_state *state)
7861 {
7862 	struct amdgpu_device *adev = drm_to_adev(dev);
7863 	struct amdgpu_dm_connector *aconnector;
7864 	struct drm_connector *connector;
7865 	struct drm_connector_state *old_con_state, *new_con_state;
7866 	struct drm_crtc_state *new_crtc_state;
7867 	struct dm_crtc_state *new_dm_crtc_state;
7868 	const struct dc_stream_status *status;
7869 	int i, inst;
7870 
7871 	/* Notify device removals. */
7872 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7873 		if (old_con_state->crtc != new_con_state->crtc) {
7874 			/* CRTC changes require notification. */
7875 			goto notify;
7876 		}
7877 
7878 		if (!new_con_state->crtc)
7879 			continue;
7880 
7881 		new_crtc_state = drm_atomic_get_new_crtc_state(
7882 			state, new_con_state->crtc);
7883 
7884 		if (!new_crtc_state)
7885 			continue;
7886 
7887 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7888 			continue;
7889 
7890 	notify:
7891 		aconnector = to_amdgpu_dm_connector(connector);
7892 
7893 		mutex_lock(&adev->dm.audio_lock);
7894 		inst = aconnector->audio_inst;
7895 		aconnector->audio_inst = -1;
7896 		mutex_unlock(&adev->dm.audio_lock);
7897 
7898 		amdgpu_dm_audio_eld_notify(adev, inst);
7899 	}
7900 
7901 	/* Notify audio device additions. */
7902 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7903 		if (!new_con_state->crtc)
7904 			continue;
7905 
7906 		new_crtc_state = drm_atomic_get_new_crtc_state(
7907 			state, new_con_state->crtc);
7908 
7909 		if (!new_crtc_state)
7910 			continue;
7911 
7912 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7913 			continue;
7914 
7915 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7916 		if (!new_dm_crtc_state->stream)
7917 			continue;
7918 
7919 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7920 		if (!status)
7921 			continue;
7922 
7923 		aconnector = to_amdgpu_dm_connector(connector);
7924 
7925 		mutex_lock(&adev->dm.audio_lock);
7926 		inst = status->audio_inst;
7927 		aconnector->audio_inst = inst;
7928 		mutex_unlock(&adev->dm.audio_lock);
7929 
7930 		amdgpu_dm_audio_eld_notify(adev, inst);
7931 	}
7932 }
7933 
7934 /*
7935  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7936  * @crtc_state: the DRM CRTC state
7937  * @stream_state: the DC stream state.
7938  *
7939  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7940  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7941  */
7942 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7943 						struct dc_stream_state *stream_state)
7944 {
7945 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7946 }
7947 
7948 /**
7949  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7950  * @state: The atomic state to commit
7951  *
7952  * This will tell DC to commit the constructed DC state from atomic_check,
7953  * programming the hardware. Any failures here implies a hardware failure, since
7954  * atomic check should have filtered anything non-kosher.
7955  */
7956 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7957 {
7958 	struct drm_device *dev = state->dev;
7959 	struct amdgpu_device *adev = drm_to_adev(dev);
7960 	struct amdgpu_display_manager *dm = &adev->dm;
7961 	struct dm_atomic_state *dm_state;
7962 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7963 	uint32_t i, j;
7964 	struct drm_crtc *crtc;
7965 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7966 	unsigned long flags;
7967 	bool wait_for_vblank = true;
7968 	struct drm_connector *connector;
7969 	struct drm_connector_state *old_con_state, *new_con_state;
7970 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7971 	int crtc_disable_count = 0;
7972 	bool mode_set_reset_required = false;
7973 
7974 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
7975 
7976 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7977 
7978 	dm_state = dm_atomic_get_new_state(state);
7979 	if (dm_state && dm_state->context) {
7980 		dc_state = dm_state->context;
7981 	} else {
7982 		/* No state changes, retain current state. */
7983 		dc_state_temp = dc_create_state(dm->dc);
7984 		ASSERT(dc_state_temp);
7985 		dc_state = dc_state_temp;
7986 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7987 	}
7988 
7989 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7990 				       new_crtc_state, i) {
7991 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7992 
7993 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7994 
7995 		if (old_crtc_state->active &&
7996 		    (!new_crtc_state->active ||
7997 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7998 			manage_dm_interrupts(adev, acrtc, false);
7999 			dc_stream_release(dm_old_crtc_state->stream);
8000 		}
8001 	}
8002 
8003 	drm_atomic_helper_calc_timestamping_constants(state);
8004 
8005 	/* update changed items */
8006 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8007 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8008 
8009 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8010 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8011 
8012 		DRM_DEBUG_DRIVER(
8013 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8014 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8015 			"connectors_changed:%d\n",
8016 			acrtc->crtc_id,
8017 			new_crtc_state->enable,
8018 			new_crtc_state->active,
8019 			new_crtc_state->planes_changed,
8020 			new_crtc_state->mode_changed,
8021 			new_crtc_state->active_changed,
8022 			new_crtc_state->connectors_changed);
8023 
8024 		/* Disable cursor if disabling crtc */
8025 		if (old_crtc_state->active && !new_crtc_state->active) {
8026 			struct dc_cursor_position position;
8027 
8028 			memset(&position, 0, sizeof(position));
8029 			mutex_lock(&dm->dc_lock);
8030 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8031 			mutex_unlock(&dm->dc_lock);
8032 		}
8033 
8034 		/* Copy all transient state flags into dc state */
8035 		if (dm_new_crtc_state->stream) {
8036 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8037 							    dm_new_crtc_state->stream);
8038 		}
8039 
8040 		/* handles headless hotplug case, updating new_state and
8041 		 * aconnector as needed
8042 		 */
8043 
8044 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8045 
8046 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8047 
8048 			if (!dm_new_crtc_state->stream) {
8049 				/*
8050 				 * this could happen because of issues with
8051 				 * userspace notifications delivery.
8052 				 * In this case userspace tries to set mode on
8053 				 * display which is disconnected in fact.
8054 				 * dc_sink is NULL in this case on aconnector.
8055 				 * We expect reset mode will come soon.
8056 				 *
8057 				 * This can also happen when unplug is done
8058 				 * during resume sequence ended
8059 				 *
8060 				 * In this case, we want to pretend we still
8061 				 * have a sink to keep the pipe running so that
8062 				 * hw state is consistent with the sw state
8063 				 */
8064 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8065 						__func__, acrtc->base.base.id);
8066 				continue;
8067 			}
8068 
8069 			if (dm_old_crtc_state->stream)
8070 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8071 
8072 			pm_runtime_get_noresume(dev->dev);
8073 
8074 			acrtc->enabled = true;
8075 			acrtc->hw_mode = new_crtc_state->mode;
8076 			crtc->hwmode = new_crtc_state->mode;
8077 			mode_set_reset_required = true;
8078 		} else if (modereset_required(new_crtc_state)) {
8079 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8080 			/* i.e. reset mode */
8081 			if (dm_old_crtc_state->stream)
8082 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8083 			mode_set_reset_required = true;
8084 		}
8085 	} /* for_each_crtc_in_state() */
8086 
8087 	if (dc_state) {
8088 		/* if there mode set or reset, disable eDP PSR */
8089 		if (mode_set_reset_required)
8090 			amdgpu_dm_psr_disable_all(dm);
8091 
8092 		dm_enable_per_frame_crtc_master_sync(dc_state);
8093 		mutex_lock(&dm->dc_lock);
8094 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8095 		mutex_unlock(&dm->dc_lock);
8096 	}
8097 
8098 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8099 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8100 
8101 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8102 
8103 		if (dm_new_crtc_state->stream != NULL) {
8104 			const struct dc_stream_status *status =
8105 					dc_stream_get_status(dm_new_crtc_state->stream);
8106 
8107 			if (!status)
8108 				status = dc_stream_get_status_from_state(dc_state,
8109 									 dm_new_crtc_state->stream);
8110 			if (!status)
8111 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8112 			else
8113 				acrtc->otg_inst = status->primary_otg_inst;
8114 		}
8115 	}
8116 #ifdef CONFIG_DRM_AMD_DC_HDCP
8117 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8118 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8119 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8120 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8121 
8122 		new_crtc_state = NULL;
8123 
8124 		if (acrtc)
8125 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8126 
8127 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8128 
8129 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8130 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8131 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8132 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8133 			dm_new_con_state->update_hdcp = true;
8134 			continue;
8135 		}
8136 
8137 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8138 			hdcp_update_display(
8139 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8140 				new_con_state->hdcp_content_type,
8141 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8142 													 : false);
8143 	}
8144 #endif
8145 
8146 	/* Handle connector state changes */
8147 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8148 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8149 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8150 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8151 		struct dc_surface_update surface_updates[MAX_SURFACES];
8152 		struct dc_stream_update stream_update;
8153 		struct dc_info_packet hdr_packet;
8154 		struct dc_stream_status *status = NULL;
8155 		bool abm_changed, hdr_changed, scaling_changed;
8156 
8157 		memset(&surface_updates, 0, sizeof(surface_updates));
8158 		memset(&stream_update, 0, sizeof(stream_update));
8159 
8160 		if (acrtc) {
8161 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8162 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8163 		}
8164 
8165 		/* Skip any modesets/resets */
8166 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8167 			continue;
8168 
8169 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8170 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8171 
8172 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8173 							     dm_old_con_state);
8174 
8175 		abm_changed = dm_new_crtc_state->abm_level !=
8176 			      dm_old_crtc_state->abm_level;
8177 
8178 		hdr_changed =
8179 			is_hdr_metadata_different(old_con_state, new_con_state);
8180 
8181 		if (!scaling_changed && !abm_changed && !hdr_changed)
8182 			continue;
8183 
8184 		stream_update.stream = dm_new_crtc_state->stream;
8185 		if (scaling_changed) {
8186 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8187 					dm_new_con_state, dm_new_crtc_state->stream);
8188 
8189 			stream_update.src = dm_new_crtc_state->stream->src;
8190 			stream_update.dst = dm_new_crtc_state->stream->dst;
8191 		}
8192 
8193 		if (abm_changed) {
8194 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8195 
8196 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8197 		}
8198 
8199 		if (hdr_changed) {
8200 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8201 			stream_update.hdr_static_metadata = &hdr_packet;
8202 		}
8203 
8204 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8205 		WARN_ON(!status);
8206 		WARN_ON(!status->plane_count);
8207 
8208 		/*
8209 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8210 		 * Here we create an empty update on each plane.
8211 		 * To fix this, DC should permit updating only stream properties.
8212 		 */
8213 		for (j = 0; j < status->plane_count; j++)
8214 			surface_updates[j].surface = status->plane_states[j];
8215 
8216 
8217 		mutex_lock(&dm->dc_lock);
8218 		dc_commit_updates_for_stream(dm->dc,
8219 						surface_updates,
8220 						     status->plane_count,
8221 						     dm_new_crtc_state->stream,
8222 						     &stream_update);
8223 		mutex_unlock(&dm->dc_lock);
8224 	}
8225 
8226 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8227 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8228 				      new_crtc_state, i) {
8229 		if (old_crtc_state->active && !new_crtc_state->active)
8230 			crtc_disable_count++;
8231 
8232 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8233 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8234 
8235 		/* For freesync config update on crtc state and params for irq */
8236 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8237 
8238 		/* Handle vrr on->off / off->on transitions */
8239 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8240 						dm_new_crtc_state);
8241 	}
8242 
8243 	/**
8244 	 * Enable interrupts for CRTCs that are newly enabled or went through
8245 	 * a modeset. It was intentionally deferred until after the front end
8246 	 * state was modified to wait until the OTG was on and so the IRQ
8247 	 * handlers didn't access stale or invalid state.
8248 	 */
8249 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8250 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8251 
8252 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8253 
8254 		if (new_crtc_state->active &&
8255 		    (!old_crtc_state->active ||
8256 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8257 			dc_stream_retain(dm_new_crtc_state->stream);
8258 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8259 			manage_dm_interrupts(adev, acrtc, true);
8260 
8261 #ifdef CONFIG_DEBUG_FS
8262 			/**
8263 			 * Frontend may have changed so reapply the CRC capture
8264 			 * settings for the stream.
8265 			 */
8266 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8267 
8268 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8269 				amdgpu_dm_crtc_configure_crc_source(
8270 					crtc, dm_new_crtc_state,
8271 					dm_new_crtc_state->crc_src);
8272 			}
8273 #endif
8274 		}
8275 	}
8276 
8277 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8278 		if (new_crtc_state->async_flip)
8279 			wait_for_vblank = false;
8280 
8281 	/* update planes when needed per crtc*/
8282 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8283 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8284 
8285 		if (dm_new_crtc_state->stream)
8286 			amdgpu_dm_commit_planes(state, dc_state, dev,
8287 						dm, crtc, wait_for_vblank);
8288 	}
8289 
8290 	/* Update audio instances for each connector. */
8291 	amdgpu_dm_commit_audio(dev, state);
8292 
8293 	/*
8294 	 * send vblank event on all events not handled in flip and
8295 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8296 	 */
8297 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8298 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8299 
8300 		if (new_crtc_state->event)
8301 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8302 
8303 		new_crtc_state->event = NULL;
8304 	}
8305 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8306 
8307 	/* Signal HW programming completion */
8308 	drm_atomic_helper_commit_hw_done(state);
8309 
8310 	if (wait_for_vblank)
8311 		drm_atomic_helper_wait_for_flip_done(dev, state);
8312 
8313 	drm_atomic_helper_cleanup_planes(dev, state);
8314 
8315 	/* return the stolen vga memory back to VRAM */
8316 	if (!adev->mman.keep_stolen_vga_memory)
8317 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8318 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8319 
8320 	/*
8321 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8322 	 * so we can put the GPU into runtime suspend if we're not driving any
8323 	 * displays anymore
8324 	 */
8325 	for (i = 0; i < crtc_disable_count; i++)
8326 		pm_runtime_put_autosuspend(dev->dev);
8327 	pm_runtime_mark_last_busy(dev->dev);
8328 
8329 	if (dc_state_temp)
8330 		dc_release_state(dc_state_temp);
8331 }
8332 
8333 
8334 static int dm_force_atomic_commit(struct drm_connector *connector)
8335 {
8336 	int ret = 0;
8337 	struct drm_device *ddev = connector->dev;
8338 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8339 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8340 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8341 	struct drm_connector_state *conn_state;
8342 	struct drm_crtc_state *crtc_state;
8343 	struct drm_plane_state *plane_state;
8344 
8345 	if (!state)
8346 		return -ENOMEM;
8347 
8348 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8349 
8350 	/* Construct an atomic state to restore previous display setting */
8351 
8352 	/*
8353 	 * Attach connectors to drm_atomic_state
8354 	 */
8355 	conn_state = drm_atomic_get_connector_state(state, connector);
8356 
8357 	ret = PTR_ERR_OR_ZERO(conn_state);
8358 	if (ret)
8359 		goto out;
8360 
8361 	/* Attach crtc to drm_atomic_state*/
8362 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8363 
8364 	ret = PTR_ERR_OR_ZERO(crtc_state);
8365 	if (ret)
8366 		goto out;
8367 
8368 	/* force a restore */
8369 	crtc_state->mode_changed = true;
8370 
8371 	/* Attach plane to drm_atomic_state */
8372 	plane_state = drm_atomic_get_plane_state(state, plane);
8373 
8374 	ret = PTR_ERR_OR_ZERO(plane_state);
8375 	if (ret)
8376 		goto out;
8377 
8378 	/* Call commit internally with the state we just constructed */
8379 	ret = drm_atomic_commit(state);
8380 
8381 out:
8382 	drm_atomic_state_put(state);
8383 	if (ret)
8384 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8385 
8386 	return ret;
8387 }
8388 
8389 /*
8390  * This function handles all cases when set mode does not come upon hotplug.
8391  * This includes when a display is unplugged then plugged back into the
8392  * same port and when running without usermode desktop manager supprot
8393  */
8394 void dm_restore_drm_connector_state(struct drm_device *dev,
8395 				    struct drm_connector *connector)
8396 {
8397 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8398 	struct amdgpu_crtc *disconnected_acrtc;
8399 	struct dm_crtc_state *acrtc_state;
8400 
8401 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8402 		return;
8403 
8404 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8405 	if (!disconnected_acrtc)
8406 		return;
8407 
8408 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8409 	if (!acrtc_state->stream)
8410 		return;
8411 
8412 	/*
8413 	 * If the previous sink is not released and different from the current,
8414 	 * we deduce we are in a state where we can not rely on usermode call
8415 	 * to turn on the display, so we do it here
8416 	 */
8417 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8418 		dm_force_atomic_commit(&aconnector->base);
8419 }
8420 
8421 /*
8422  * Grabs all modesetting locks to serialize against any blocking commits,
8423  * Waits for completion of all non blocking commits.
8424  */
8425 static int do_aquire_global_lock(struct drm_device *dev,
8426 				 struct drm_atomic_state *state)
8427 {
8428 	struct drm_crtc *crtc;
8429 	struct drm_crtc_commit *commit;
8430 	long ret;
8431 
8432 	/*
8433 	 * Adding all modeset locks to aquire_ctx will
8434 	 * ensure that when the framework release it the
8435 	 * extra locks we are locking here will get released to
8436 	 */
8437 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8438 	if (ret)
8439 		return ret;
8440 
8441 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8442 		spin_lock(&crtc->commit_lock);
8443 		commit = list_first_entry_or_null(&crtc->commit_list,
8444 				struct drm_crtc_commit, commit_entry);
8445 		if (commit)
8446 			drm_crtc_commit_get(commit);
8447 		spin_unlock(&crtc->commit_lock);
8448 
8449 		if (!commit)
8450 			continue;
8451 
8452 		/*
8453 		 * Make sure all pending HW programming completed and
8454 		 * page flips done
8455 		 */
8456 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8457 
8458 		if (ret > 0)
8459 			ret = wait_for_completion_interruptible_timeout(
8460 					&commit->flip_done, 10*HZ);
8461 
8462 		if (ret == 0)
8463 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8464 				  "timed out\n", crtc->base.id, crtc->name);
8465 
8466 		drm_crtc_commit_put(commit);
8467 	}
8468 
8469 	return ret < 0 ? ret : 0;
8470 }
8471 
8472 static void get_freesync_config_for_crtc(
8473 	struct dm_crtc_state *new_crtc_state,
8474 	struct dm_connector_state *new_con_state)
8475 {
8476 	struct mod_freesync_config config = {0};
8477 	struct amdgpu_dm_connector *aconnector =
8478 			to_amdgpu_dm_connector(new_con_state->base.connector);
8479 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8480 	int vrefresh = drm_mode_vrefresh(mode);
8481 
8482 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8483 					vrefresh >= aconnector->min_vfreq &&
8484 					vrefresh <= aconnector->max_vfreq;
8485 
8486 	if (new_crtc_state->vrr_supported) {
8487 		new_crtc_state->stream->ignore_msa_timing_param = true;
8488 		config.state = new_crtc_state->base.vrr_enabled ?
8489 				VRR_STATE_ACTIVE_VARIABLE :
8490 				VRR_STATE_INACTIVE;
8491 		config.min_refresh_in_uhz =
8492 				aconnector->min_vfreq * 1000000;
8493 		config.max_refresh_in_uhz =
8494 				aconnector->max_vfreq * 1000000;
8495 		config.vsif_supported = true;
8496 		config.btr = true;
8497 	}
8498 
8499 	new_crtc_state->freesync_config = config;
8500 }
8501 
8502 static void reset_freesync_config_for_crtc(
8503 	struct dm_crtc_state *new_crtc_state)
8504 {
8505 	new_crtc_state->vrr_supported = false;
8506 
8507 	memset(&new_crtc_state->vrr_infopacket, 0,
8508 	       sizeof(new_crtc_state->vrr_infopacket));
8509 }
8510 
8511 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8512 				struct drm_atomic_state *state,
8513 				struct drm_crtc *crtc,
8514 				struct drm_crtc_state *old_crtc_state,
8515 				struct drm_crtc_state *new_crtc_state,
8516 				bool enable,
8517 				bool *lock_and_validation_needed)
8518 {
8519 	struct dm_atomic_state *dm_state = NULL;
8520 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8521 	struct dc_stream_state *new_stream;
8522 	int ret = 0;
8523 
8524 	/*
8525 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8526 	 * update changed items
8527 	 */
8528 	struct amdgpu_crtc *acrtc = NULL;
8529 	struct amdgpu_dm_connector *aconnector = NULL;
8530 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8531 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8532 
8533 	new_stream = NULL;
8534 
8535 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8536 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8537 	acrtc = to_amdgpu_crtc(crtc);
8538 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8539 
8540 	/* TODO This hack should go away */
8541 	if (aconnector && enable) {
8542 		/* Make sure fake sink is created in plug-in scenario */
8543 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8544 							    &aconnector->base);
8545 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8546 							    &aconnector->base);
8547 
8548 		if (IS_ERR(drm_new_conn_state)) {
8549 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8550 			goto fail;
8551 		}
8552 
8553 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8554 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8555 
8556 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8557 			goto skip_modeset;
8558 
8559 		new_stream = create_validate_stream_for_sink(aconnector,
8560 							     &new_crtc_state->mode,
8561 							     dm_new_conn_state,
8562 							     dm_old_crtc_state->stream);
8563 
8564 		/*
8565 		 * we can have no stream on ACTION_SET if a display
8566 		 * was disconnected during S3, in this case it is not an
8567 		 * error, the OS will be updated after detection, and
8568 		 * will do the right thing on next atomic commit
8569 		 */
8570 
8571 		if (!new_stream) {
8572 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8573 					__func__, acrtc->base.base.id);
8574 			ret = -ENOMEM;
8575 			goto fail;
8576 		}
8577 
8578 		/*
8579 		 * TODO: Check VSDB bits to decide whether this should
8580 		 * be enabled or not.
8581 		 */
8582 		new_stream->triggered_crtc_reset.enabled =
8583 			dm->force_timing_sync;
8584 
8585 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8586 
8587 		ret = fill_hdr_info_packet(drm_new_conn_state,
8588 					   &new_stream->hdr_static_metadata);
8589 		if (ret)
8590 			goto fail;
8591 
8592 		/*
8593 		 * If we already removed the old stream from the context
8594 		 * (and set the new stream to NULL) then we can't reuse
8595 		 * the old stream even if the stream and scaling are unchanged.
8596 		 * We'll hit the BUG_ON and black screen.
8597 		 *
8598 		 * TODO: Refactor this function to allow this check to work
8599 		 * in all conditions.
8600 		 */
8601 		if (dm_new_crtc_state->stream &&
8602 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8603 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8604 			new_crtc_state->mode_changed = false;
8605 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8606 					 new_crtc_state->mode_changed);
8607 		}
8608 	}
8609 
8610 	/* mode_changed flag may get updated above, need to check again */
8611 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8612 		goto skip_modeset;
8613 
8614 	DRM_DEBUG_DRIVER(
8615 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8616 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8617 		"connectors_changed:%d\n",
8618 		acrtc->crtc_id,
8619 		new_crtc_state->enable,
8620 		new_crtc_state->active,
8621 		new_crtc_state->planes_changed,
8622 		new_crtc_state->mode_changed,
8623 		new_crtc_state->active_changed,
8624 		new_crtc_state->connectors_changed);
8625 
8626 	/* Remove stream for any changed/disabled CRTC */
8627 	if (!enable) {
8628 
8629 		if (!dm_old_crtc_state->stream)
8630 			goto skip_modeset;
8631 
8632 		ret = dm_atomic_get_state(state, &dm_state);
8633 		if (ret)
8634 			goto fail;
8635 
8636 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8637 				crtc->base.id);
8638 
8639 		/* i.e. reset mode */
8640 		if (dc_remove_stream_from_ctx(
8641 				dm->dc,
8642 				dm_state->context,
8643 				dm_old_crtc_state->stream) != DC_OK) {
8644 			ret = -EINVAL;
8645 			goto fail;
8646 		}
8647 
8648 		dc_stream_release(dm_old_crtc_state->stream);
8649 		dm_new_crtc_state->stream = NULL;
8650 
8651 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8652 
8653 		*lock_and_validation_needed = true;
8654 
8655 	} else {/* Add stream for any updated/enabled CRTC */
8656 		/*
8657 		 * Quick fix to prevent NULL pointer on new_stream when
8658 		 * added MST connectors not found in existing crtc_state in the chained mode
8659 		 * TODO: need to dig out the root cause of that
8660 		 */
8661 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8662 			goto skip_modeset;
8663 
8664 		if (modereset_required(new_crtc_state))
8665 			goto skip_modeset;
8666 
8667 		if (modeset_required(new_crtc_state, new_stream,
8668 				     dm_old_crtc_state->stream)) {
8669 
8670 			WARN_ON(dm_new_crtc_state->stream);
8671 
8672 			ret = dm_atomic_get_state(state, &dm_state);
8673 			if (ret)
8674 				goto fail;
8675 
8676 			dm_new_crtc_state->stream = new_stream;
8677 
8678 			dc_stream_retain(new_stream);
8679 
8680 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8681 						crtc->base.id);
8682 
8683 			if (dc_add_stream_to_ctx(
8684 					dm->dc,
8685 					dm_state->context,
8686 					dm_new_crtc_state->stream) != DC_OK) {
8687 				ret = -EINVAL;
8688 				goto fail;
8689 			}
8690 
8691 			*lock_and_validation_needed = true;
8692 		}
8693 	}
8694 
8695 skip_modeset:
8696 	/* Release extra reference */
8697 	if (new_stream)
8698 		 dc_stream_release(new_stream);
8699 
8700 	/*
8701 	 * We want to do dc stream updates that do not require a
8702 	 * full modeset below.
8703 	 */
8704 	if (!(enable && aconnector && new_crtc_state->active))
8705 		return 0;
8706 	/*
8707 	 * Given above conditions, the dc state cannot be NULL because:
8708 	 * 1. We're in the process of enabling CRTCs (just been added
8709 	 *    to the dc context, or already is on the context)
8710 	 * 2. Has a valid connector attached, and
8711 	 * 3. Is currently active and enabled.
8712 	 * => The dc stream state currently exists.
8713 	 */
8714 	BUG_ON(dm_new_crtc_state->stream == NULL);
8715 
8716 	/* Scaling or underscan settings */
8717 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8718 		update_stream_scaling_settings(
8719 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8720 
8721 	/* ABM settings */
8722 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8723 
8724 	/*
8725 	 * Color management settings. We also update color properties
8726 	 * when a modeset is needed, to ensure it gets reprogrammed.
8727 	 */
8728 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8729 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8730 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8731 		if (ret)
8732 			goto fail;
8733 	}
8734 
8735 	/* Update Freesync settings. */
8736 	get_freesync_config_for_crtc(dm_new_crtc_state,
8737 				     dm_new_conn_state);
8738 
8739 	return ret;
8740 
8741 fail:
8742 	if (new_stream)
8743 		dc_stream_release(new_stream);
8744 	return ret;
8745 }
8746 
8747 static bool should_reset_plane(struct drm_atomic_state *state,
8748 			       struct drm_plane *plane,
8749 			       struct drm_plane_state *old_plane_state,
8750 			       struct drm_plane_state *new_plane_state)
8751 {
8752 	struct drm_plane *other;
8753 	struct drm_plane_state *old_other_state, *new_other_state;
8754 	struct drm_crtc_state *new_crtc_state;
8755 	int i;
8756 
8757 	/*
8758 	 * TODO: Remove this hack once the checks below are sufficient
8759 	 * enough to determine when we need to reset all the planes on
8760 	 * the stream.
8761 	 */
8762 	if (state->allow_modeset)
8763 		return true;
8764 
8765 	/* Exit early if we know that we're adding or removing the plane. */
8766 	if (old_plane_state->crtc != new_plane_state->crtc)
8767 		return true;
8768 
8769 	/* old crtc == new_crtc == NULL, plane not in context. */
8770 	if (!new_plane_state->crtc)
8771 		return false;
8772 
8773 	new_crtc_state =
8774 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8775 
8776 	if (!new_crtc_state)
8777 		return true;
8778 
8779 	/* CRTC Degamma changes currently require us to recreate planes. */
8780 	if (new_crtc_state->color_mgmt_changed)
8781 		return true;
8782 
8783 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8784 		return true;
8785 
8786 	/*
8787 	 * If there are any new primary or overlay planes being added or
8788 	 * removed then the z-order can potentially change. To ensure
8789 	 * correct z-order and pipe acquisition the current DC architecture
8790 	 * requires us to remove and recreate all existing planes.
8791 	 *
8792 	 * TODO: Come up with a more elegant solution for this.
8793 	 */
8794 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8795 		struct amdgpu_framebuffer *old_afb, *new_afb;
8796 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8797 			continue;
8798 
8799 		if (old_other_state->crtc != new_plane_state->crtc &&
8800 		    new_other_state->crtc != new_plane_state->crtc)
8801 			continue;
8802 
8803 		if (old_other_state->crtc != new_other_state->crtc)
8804 			return true;
8805 
8806 		/* Src/dst size and scaling updates. */
8807 		if (old_other_state->src_w != new_other_state->src_w ||
8808 		    old_other_state->src_h != new_other_state->src_h ||
8809 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8810 		    old_other_state->crtc_h != new_other_state->crtc_h)
8811 			return true;
8812 
8813 		/* Rotation / mirroring updates. */
8814 		if (old_other_state->rotation != new_other_state->rotation)
8815 			return true;
8816 
8817 		/* Blending updates. */
8818 		if (old_other_state->pixel_blend_mode !=
8819 		    new_other_state->pixel_blend_mode)
8820 			return true;
8821 
8822 		/* Alpha updates. */
8823 		if (old_other_state->alpha != new_other_state->alpha)
8824 			return true;
8825 
8826 		/* Colorspace changes. */
8827 		if (old_other_state->color_range != new_other_state->color_range ||
8828 		    old_other_state->color_encoding != new_other_state->color_encoding)
8829 			return true;
8830 
8831 		/* Framebuffer checks fall at the end. */
8832 		if (!old_other_state->fb || !new_other_state->fb)
8833 			continue;
8834 
8835 		/* Pixel format changes can require bandwidth updates. */
8836 		if (old_other_state->fb->format != new_other_state->fb->format)
8837 			return true;
8838 
8839 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8840 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8841 
8842 		/* Tiling and DCC changes also require bandwidth updates. */
8843 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8844 		    old_afb->base.modifier != new_afb->base.modifier)
8845 			return true;
8846 	}
8847 
8848 	return false;
8849 }
8850 
8851 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8852 			      struct drm_plane_state *new_plane_state,
8853 			      struct drm_framebuffer *fb)
8854 {
8855 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8856 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8857 	unsigned int pitch;
8858 	bool linear;
8859 
8860 	if (fb->width > new_acrtc->max_cursor_width ||
8861 	    fb->height > new_acrtc->max_cursor_height) {
8862 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8863 				 new_plane_state->fb->width,
8864 				 new_plane_state->fb->height);
8865 		return -EINVAL;
8866 	}
8867 	if (new_plane_state->src_w != fb->width << 16 ||
8868 	    new_plane_state->src_h != fb->height << 16) {
8869 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8870 		return -EINVAL;
8871 	}
8872 
8873 	/* Pitch in pixels */
8874 	pitch = fb->pitches[0] / fb->format->cpp[0];
8875 
8876 	if (fb->width != pitch) {
8877 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8878 				 fb->width, pitch);
8879 		return -EINVAL;
8880 	}
8881 
8882 	switch (pitch) {
8883 	case 64:
8884 	case 128:
8885 	case 256:
8886 		/* FB pitch is supported by cursor plane */
8887 		break;
8888 	default:
8889 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8890 		return -EINVAL;
8891 	}
8892 
8893 	/* Core DRM takes care of checking FB modifiers, so we only need to
8894 	 * check tiling flags when the FB doesn't have a modifier. */
8895 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8896 		if (adev->family < AMDGPU_FAMILY_AI) {
8897 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8898 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8899 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8900 		} else {
8901 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
8902 		}
8903 		if (!linear) {
8904 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
8905 			return -EINVAL;
8906 		}
8907 	}
8908 
8909 	return 0;
8910 }
8911 
8912 static int dm_update_plane_state(struct dc *dc,
8913 				 struct drm_atomic_state *state,
8914 				 struct drm_plane *plane,
8915 				 struct drm_plane_state *old_plane_state,
8916 				 struct drm_plane_state *new_plane_state,
8917 				 bool enable,
8918 				 bool *lock_and_validation_needed)
8919 {
8920 
8921 	struct dm_atomic_state *dm_state = NULL;
8922 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8923 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8924 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8925 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8926 	struct amdgpu_crtc *new_acrtc;
8927 	bool needs_reset;
8928 	int ret = 0;
8929 
8930 
8931 	new_plane_crtc = new_plane_state->crtc;
8932 	old_plane_crtc = old_plane_state->crtc;
8933 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8934 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8935 
8936 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8937 		if (!enable || !new_plane_crtc ||
8938 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8939 			return 0;
8940 
8941 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8942 
8943 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
8944 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8945 			return -EINVAL;
8946 		}
8947 
8948 		if (new_plane_state->fb) {
8949 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
8950 						 new_plane_state->fb);
8951 			if (ret)
8952 				return ret;
8953 		}
8954 
8955 		return 0;
8956 	}
8957 
8958 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8959 					 new_plane_state);
8960 
8961 	/* Remove any changed/removed planes */
8962 	if (!enable) {
8963 		if (!needs_reset)
8964 			return 0;
8965 
8966 		if (!old_plane_crtc)
8967 			return 0;
8968 
8969 		old_crtc_state = drm_atomic_get_old_crtc_state(
8970 				state, old_plane_crtc);
8971 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8972 
8973 		if (!dm_old_crtc_state->stream)
8974 			return 0;
8975 
8976 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8977 				plane->base.id, old_plane_crtc->base.id);
8978 
8979 		ret = dm_atomic_get_state(state, &dm_state);
8980 		if (ret)
8981 			return ret;
8982 
8983 		if (!dc_remove_plane_from_context(
8984 				dc,
8985 				dm_old_crtc_state->stream,
8986 				dm_old_plane_state->dc_state,
8987 				dm_state->context)) {
8988 
8989 			return -EINVAL;
8990 		}
8991 
8992 
8993 		dc_plane_state_release(dm_old_plane_state->dc_state);
8994 		dm_new_plane_state->dc_state = NULL;
8995 
8996 		*lock_and_validation_needed = true;
8997 
8998 	} else { /* Add new planes */
8999 		struct dc_plane_state *dc_new_plane_state;
9000 
9001 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9002 			return 0;
9003 
9004 		if (!new_plane_crtc)
9005 			return 0;
9006 
9007 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9008 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9009 
9010 		if (!dm_new_crtc_state->stream)
9011 			return 0;
9012 
9013 		if (!needs_reset)
9014 			return 0;
9015 
9016 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9017 		if (ret)
9018 			return ret;
9019 
9020 		WARN_ON(dm_new_plane_state->dc_state);
9021 
9022 		dc_new_plane_state = dc_create_plane_state(dc);
9023 		if (!dc_new_plane_state)
9024 			return -ENOMEM;
9025 
9026 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9027 				plane->base.id, new_plane_crtc->base.id);
9028 
9029 		ret = fill_dc_plane_attributes(
9030 			drm_to_adev(new_plane_crtc->dev),
9031 			dc_new_plane_state,
9032 			new_plane_state,
9033 			new_crtc_state);
9034 		if (ret) {
9035 			dc_plane_state_release(dc_new_plane_state);
9036 			return ret;
9037 		}
9038 
9039 		ret = dm_atomic_get_state(state, &dm_state);
9040 		if (ret) {
9041 			dc_plane_state_release(dc_new_plane_state);
9042 			return ret;
9043 		}
9044 
9045 		/*
9046 		 * Any atomic check errors that occur after this will
9047 		 * not need a release. The plane state will be attached
9048 		 * to the stream, and therefore part of the atomic
9049 		 * state. It'll be released when the atomic state is
9050 		 * cleaned.
9051 		 */
9052 		if (!dc_add_plane_to_context(
9053 				dc,
9054 				dm_new_crtc_state->stream,
9055 				dc_new_plane_state,
9056 				dm_state->context)) {
9057 
9058 			dc_plane_state_release(dc_new_plane_state);
9059 			return -EINVAL;
9060 		}
9061 
9062 		dm_new_plane_state->dc_state = dc_new_plane_state;
9063 
9064 		/* Tell DC to do a full surface update every time there
9065 		 * is a plane change. Inefficient, but works for now.
9066 		 */
9067 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9068 
9069 		*lock_and_validation_needed = true;
9070 	}
9071 
9072 
9073 	return ret;
9074 }
9075 
9076 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9077 				struct drm_crtc *crtc,
9078 				struct drm_crtc_state *new_crtc_state)
9079 {
9080 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9081 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9082 
9083 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9084 	 * cursor per pipe but it's going to inherit the scaling and
9085 	 * positioning from the underlying pipe. Check the cursor plane's
9086 	 * blending properties match the primary plane's. */
9087 
9088 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9089 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9090 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9091 		return 0;
9092 	}
9093 
9094 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9095 			 (new_cursor_state->src_w >> 16);
9096 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9097 			 (new_cursor_state->src_h >> 16);
9098 
9099 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9100 			 (new_primary_state->src_w >> 16);
9101 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9102 			 (new_primary_state->src_h >> 16);
9103 
9104 	if (cursor_scale_w != primary_scale_w ||
9105 	    cursor_scale_h != primary_scale_h) {
9106 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9107 		return -EINVAL;
9108 	}
9109 
9110 	return 0;
9111 }
9112 
9113 #if defined(CONFIG_DRM_AMD_DC_DCN)
9114 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9115 {
9116 	struct drm_connector *connector;
9117 	struct drm_connector_state *conn_state;
9118 	struct amdgpu_dm_connector *aconnector = NULL;
9119 	int i;
9120 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9121 		if (conn_state->crtc != crtc)
9122 			continue;
9123 
9124 		aconnector = to_amdgpu_dm_connector(connector);
9125 		if (!aconnector->port || !aconnector->mst_port)
9126 			aconnector = NULL;
9127 		else
9128 			break;
9129 	}
9130 
9131 	if (!aconnector)
9132 		return 0;
9133 
9134 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9135 }
9136 #endif
9137 
9138 /**
9139  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9140  * @dev: The DRM device
9141  * @state: The atomic state to commit
9142  *
9143  * Validate that the given atomic state is programmable by DC into hardware.
9144  * This involves constructing a &struct dc_state reflecting the new hardware
9145  * state we wish to commit, then querying DC to see if it is programmable. It's
9146  * important not to modify the existing DC state. Otherwise, atomic_check
9147  * may unexpectedly commit hardware changes.
9148  *
9149  * When validating the DC state, it's important that the right locks are
9150  * acquired. For full updates case which removes/adds/updates streams on one
9151  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9152  * that any such full update commit will wait for completion of any outstanding
9153  * flip using DRMs synchronization events.
9154  *
9155  * Note that DM adds the affected connectors for all CRTCs in state, when that
9156  * might not seem necessary. This is because DC stream creation requires the
9157  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9158  * be possible but non-trivial - a possible TODO item.
9159  *
9160  * Return: -Error code if validation failed.
9161  */
9162 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9163 				  struct drm_atomic_state *state)
9164 {
9165 	struct amdgpu_device *adev = drm_to_adev(dev);
9166 	struct dm_atomic_state *dm_state = NULL;
9167 	struct dc *dc = adev->dm.dc;
9168 	struct drm_connector *connector;
9169 	struct drm_connector_state *old_con_state, *new_con_state;
9170 	struct drm_crtc *crtc;
9171 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9172 	struct drm_plane *plane;
9173 	struct drm_plane_state *old_plane_state, *new_plane_state;
9174 	enum dc_status status;
9175 	int ret, i;
9176 	bool lock_and_validation_needed = false;
9177 	struct dm_crtc_state *dm_old_crtc_state;
9178 
9179 	trace_amdgpu_dm_atomic_check_begin(state);
9180 
9181 	ret = drm_atomic_helper_check_modeset(dev, state);
9182 	if (ret)
9183 		goto fail;
9184 
9185 	/* Check connector changes */
9186 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9187 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9188 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9189 
9190 		/* Skip connectors that are disabled or part of modeset already. */
9191 		if (!old_con_state->crtc && !new_con_state->crtc)
9192 			continue;
9193 
9194 		if (!new_con_state->crtc)
9195 			continue;
9196 
9197 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9198 		if (IS_ERR(new_crtc_state)) {
9199 			ret = PTR_ERR(new_crtc_state);
9200 			goto fail;
9201 		}
9202 
9203 		if (dm_old_con_state->abm_level !=
9204 		    dm_new_con_state->abm_level)
9205 			new_crtc_state->connectors_changed = true;
9206 	}
9207 
9208 #if defined(CONFIG_DRM_AMD_DC_DCN)
9209 	if (adev->asic_type >= CHIP_NAVI10) {
9210 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9211 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9212 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9213 				if (ret)
9214 					goto fail;
9215 			}
9216 		}
9217 	}
9218 #endif
9219 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9220 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9221 
9222 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9223 		    !new_crtc_state->color_mgmt_changed &&
9224 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9225 			dm_old_crtc_state->dsc_force_changed == false)
9226 			continue;
9227 
9228 		if (!new_crtc_state->enable)
9229 			continue;
9230 
9231 		ret = drm_atomic_add_affected_connectors(state, crtc);
9232 		if (ret)
9233 			return ret;
9234 
9235 		ret = drm_atomic_add_affected_planes(state, crtc);
9236 		if (ret)
9237 			goto fail;
9238 
9239 		if (dm_old_crtc_state->dsc_force_changed)
9240 			new_crtc_state->mode_changed = true;
9241 	}
9242 
9243 	/*
9244 	 * Add all primary and overlay planes on the CRTC to the state
9245 	 * whenever a plane is enabled to maintain correct z-ordering
9246 	 * and to enable fast surface updates.
9247 	 */
9248 	drm_for_each_crtc(crtc, dev) {
9249 		bool modified = false;
9250 
9251 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9252 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9253 				continue;
9254 
9255 			if (new_plane_state->crtc == crtc ||
9256 			    old_plane_state->crtc == crtc) {
9257 				modified = true;
9258 				break;
9259 			}
9260 		}
9261 
9262 		if (!modified)
9263 			continue;
9264 
9265 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9266 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9267 				continue;
9268 
9269 			new_plane_state =
9270 				drm_atomic_get_plane_state(state, plane);
9271 
9272 			if (IS_ERR(new_plane_state)) {
9273 				ret = PTR_ERR(new_plane_state);
9274 				goto fail;
9275 			}
9276 		}
9277 	}
9278 
9279 	/* Remove exiting planes if they are modified */
9280 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9281 		ret = dm_update_plane_state(dc, state, plane,
9282 					    old_plane_state,
9283 					    new_plane_state,
9284 					    false,
9285 					    &lock_and_validation_needed);
9286 		if (ret)
9287 			goto fail;
9288 	}
9289 
9290 	/* Disable all crtcs which require disable */
9291 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9292 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9293 					   old_crtc_state,
9294 					   new_crtc_state,
9295 					   false,
9296 					   &lock_and_validation_needed);
9297 		if (ret)
9298 			goto fail;
9299 	}
9300 
9301 	/* Enable all crtcs which require enable */
9302 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9303 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9304 					   old_crtc_state,
9305 					   new_crtc_state,
9306 					   true,
9307 					   &lock_and_validation_needed);
9308 		if (ret)
9309 			goto fail;
9310 	}
9311 
9312 	/* Add new/modified planes */
9313 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9314 		ret = dm_update_plane_state(dc, state, plane,
9315 					    old_plane_state,
9316 					    new_plane_state,
9317 					    true,
9318 					    &lock_and_validation_needed);
9319 		if (ret)
9320 			goto fail;
9321 	}
9322 
9323 	/* Run this here since we want to validate the streams we created */
9324 	ret = drm_atomic_helper_check_planes(dev, state);
9325 	if (ret)
9326 		goto fail;
9327 
9328 	/* Check cursor planes scaling */
9329 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9330 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9331 		if (ret)
9332 			goto fail;
9333 	}
9334 
9335 	if (state->legacy_cursor_update) {
9336 		/*
9337 		 * This is a fast cursor update coming from the plane update
9338 		 * helper, check if it can be done asynchronously for better
9339 		 * performance.
9340 		 */
9341 		state->async_update =
9342 			!drm_atomic_helper_async_check(dev, state);
9343 
9344 		/*
9345 		 * Skip the remaining global validation if this is an async
9346 		 * update. Cursor updates can be done without affecting
9347 		 * state or bandwidth calcs and this avoids the performance
9348 		 * penalty of locking the private state object and
9349 		 * allocating a new dc_state.
9350 		 */
9351 		if (state->async_update)
9352 			return 0;
9353 	}
9354 
9355 	/* Check scaling and underscan changes*/
9356 	/* TODO Removed scaling changes validation due to inability to commit
9357 	 * new stream into context w\o causing full reset. Need to
9358 	 * decide how to handle.
9359 	 */
9360 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9361 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9362 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9363 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9364 
9365 		/* Skip any modesets/resets */
9366 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9367 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9368 			continue;
9369 
9370 		/* Skip any thing not scale or underscan changes */
9371 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9372 			continue;
9373 
9374 		lock_and_validation_needed = true;
9375 	}
9376 
9377 	/**
9378 	 * Streams and planes are reset when there are changes that affect
9379 	 * bandwidth. Anything that affects bandwidth needs to go through
9380 	 * DC global validation to ensure that the configuration can be applied
9381 	 * to hardware.
9382 	 *
9383 	 * We have to currently stall out here in atomic_check for outstanding
9384 	 * commits to finish in this case because our IRQ handlers reference
9385 	 * DRM state directly - we can end up disabling interrupts too early
9386 	 * if we don't.
9387 	 *
9388 	 * TODO: Remove this stall and drop DM state private objects.
9389 	 */
9390 	if (lock_and_validation_needed) {
9391 		ret = dm_atomic_get_state(state, &dm_state);
9392 		if (ret)
9393 			goto fail;
9394 
9395 		ret = do_aquire_global_lock(dev, state);
9396 		if (ret)
9397 			goto fail;
9398 
9399 #if defined(CONFIG_DRM_AMD_DC_DCN)
9400 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9401 			goto fail;
9402 
9403 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9404 		if (ret)
9405 			goto fail;
9406 #endif
9407 
9408 		/*
9409 		 * Perform validation of MST topology in the state:
9410 		 * We need to perform MST atomic check before calling
9411 		 * dc_validate_global_state(), or there is a chance
9412 		 * to get stuck in an infinite loop and hang eventually.
9413 		 */
9414 		ret = drm_dp_mst_atomic_check(state);
9415 		if (ret)
9416 			goto fail;
9417 		status = dc_validate_global_state(dc, dm_state->context, false);
9418 		if (status != DC_OK) {
9419 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9420 				       dc_status_to_str(status), status);
9421 			ret = -EINVAL;
9422 			goto fail;
9423 		}
9424 	} else {
9425 		/*
9426 		 * The commit is a fast update. Fast updates shouldn't change
9427 		 * the DC context, affect global validation, and can have their
9428 		 * commit work done in parallel with other commits not touching
9429 		 * the same resource. If we have a new DC context as part of
9430 		 * the DM atomic state from validation we need to free it and
9431 		 * retain the existing one instead.
9432 		 *
9433 		 * Furthermore, since the DM atomic state only contains the DC
9434 		 * context and can safely be annulled, we can free the state
9435 		 * and clear the associated private object now to free
9436 		 * some memory and avoid a possible use-after-free later.
9437 		 */
9438 
9439 		for (i = 0; i < state->num_private_objs; i++) {
9440 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9441 
9442 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9443 				int j = state->num_private_objs-1;
9444 
9445 				dm_atomic_destroy_state(obj,
9446 						state->private_objs[i].state);
9447 
9448 				/* If i is not at the end of the array then the
9449 				 * last element needs to be moved to where i was
9450 				 * before the array can safely be truncated.
9451 				 */
9452 				if (i != j)
9453 					state->private_objs[i] =
9454 						state->private_objs[j];
9455 
9456 				state->private_objs[j].ptr = NULL;
9457 				state->private_objs[j].state = NULL;
9458 				state->private_objs[j].old_state = NULL;
9459 				state->private_objs[j].new_state = NULL;
9460 
9461 				state->num_private_objs = j;
9462 				break;
9463 			}
9464 		}
9465 	}
9466 
9467 	/* Store the overall update type for use later in atomic check. */
9468 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9469 		struct dm_crtc_state *dm_new_crtc_state =
9470 			to_dm_crtc_state(new_crtc_state);
9471 
9472 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9473 							 UPDATE_TYPE_FULL :
9474 							 UPDATE_TYPE_FAST;
9475 	}
9476 
9477 	/* Must be success */
9478 	WARN_ON(ret);
9479 
9480 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9481 
9482 	return ret;
9483 
9484 fail:
9485 	if (ret == -EDEADLK)
9486 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9487 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9488 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9489 	else
9490 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9491 
9492 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9493 
9494 	return ret;
9495 }
9496 
9497 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9498 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9499 {
9500 	uint8_t dpcd_data;
9501 	bool capable = false;
9502 
9503 	if (amdgpu_dm_connector->dc_link &&
9504 		dm_helpers_dp_read_dpcd(
9505 				NULL,
9506 				amdgpu_dm_connector->dc_link,
9507 				DP_DOWN_STREAM_PORT_COUNT,
9508 				&dpcd_data,
9509 				sizeof(dpcd_data))) {
9510 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9511 	}
9512 
9513 	return capable;
9514 }
9515 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9516 					struct edid *edid)
9517 {
9518 	int i;
9519 	bool edid_check_required;
9520 	struct detailed_timing *timing;
9521 	struct detailed_non_pixel *data;
9522 	struct detailed_data_monitor_range *range;
9523 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9524 			to_amdgpu_dm_connector(connector);
9525 	struct dm_connector_state *dm_con_state = NULL;
9526 
9527 	struct drm_device *dev = connector->dev;
9528 	struct amdgpu_device *adev = drm_to_adev(dev);
9529 	bool freesync_capable = false;
9530 
9531 	if (!connector->state) {
9532 		DRM_ERROR("%s - Connector has no state", __func__);
9533 		goto update;
9534 	}
9535 
9536 	if (!edid) {
9537 		dm_con_state = to_dm_connector_state(connector->state);
9538 
9539 		amdgpu_dm_connector->min_vfreq = 0;
9540 		amdgpu_dm_connector->max_vfreq = 0;
9541 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9542 
9543 		goto update;
9544 	}
9545 
9546 	dm_con_state = to_dm_connector_state(connector->state);
9547 
9548 	edid_check_required = false;
9549 	if (!amdgpu_dm_connector->dc_sink) {
9550 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9551 		goto update;
9552 	}
9553 	if (!adev->dm.freesync_module)
9554 		goto update;
9555 	/*
9556 	 * if edid non zero restrict freesync only for dp and edp
9557 	 */
9558 	if (edid) {
9559 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9560 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9561 			edid_check_required = is_dp_capable_without_timing_msa(
9562 						adev->dm.dc,
9563 						amdgpu_dm_connector);
9564 		}
9565 	}
9566 	if (edid_check_required == true && (edid->version > 1 ||
9567 	   (edid->version == 1 && edid->revision > 1))) {
9568 		for (i = 0; i < 4; i++) {
9569 
9570 			timing	= &edid->detailed_timings[i];
9571 			data	= &timing->data.other_data;
9572 			range	= &data->data.range;
9573 			/*
9574 			 * Check if monitor has continuous frequency mode
9575 			 */
9576 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9577 				continue;
9578 			/*
9579 			 * Check for flag range limits only. If flag == 1 then
9580 			 * no additional timing information provided.
9581 			 * Default GTF, GTF Secondary curve and CVT are not
9582 			 * supported
9583 			 */
9584 			if (range->flags != 1)
9585 				continue;
9586 
9587 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9588 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9589 			amdgpu_dm_connector->pixel_clock_mhz =
9590 				range->pixel_clock_mhz * 10;
9591 			break;
9592 		}
9593 
9594 		if (amdgpu_dm_connector->max_vfreq -
9595 		    amdgpu_dm_connector->min_vfreq > 10) {
9596 
9597 			freesync_capable = true;
9598 		}
9599 	}
9600 
9601 update:
9602 	if (dm_con_state)
9603 		dm_con_state->freesync_capable = freesync_capable;
9604 
9605 	if (connector->vrr_capable_property)
9606 		drm_connector_set_vrr_capable_property(connector,
9607 						       freesync_capable);
9608 }
9609 
9610 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9611 {
9612 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9613 
9614 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9615 		return;
9616 	if (link->type == dc_connection_none)
9617 		return;
9618 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9619 					dpcd_data, sizeof(dpcd_data))) {
9620 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9621 
9622 		if (dpcd_data[0] == 0) {
9623 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9624 			link->psr_settings.psr_feature_enabled = false;
9625 		} else {
9626 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9627 			link->psr_settings.psr_feature_enabled = true;
9628 		}
9629 
9630 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9631 	}
9632 }
9633 
9634 /*
9635  * amdgpu_dm_link_setup_psr() - configure psr link
9636  * @stream: stream state
9637  *
9638  * Return: true if success
9639  */
9640 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9641 {
9642 	struct dc_link *link = NULL;
9643 	struct psr_config psr_config = {0};
9644 	struct psr_context psr_context = {0};
9645 	bool ret = false;
9646 
9647 	if (stream == NULL)
9648 		return false;
9649 
9650 	link = stream->link;
9651 
9652 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9653 
9654 	if (psr_config.psr_version > 0) {
9655 		psr_config.psr_exit_link_training_required = 0x1;
9656 		psr_config.psr_frame_capture_indication_req = 0;
9657 		psr_config.psr_rfb_setup_time = 0x37;
9658 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9659 		psr_config.allow_smu_optimizations = 0x0;
9660 
9661 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9662 
9663 	}
9664 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9665 
9666 	return ret;
9667 }
9668 
9669 /*
9670  * amdgpu_dm_psr_enable() - enable psr f/w
9671  * @stream: stream state
9672  *
9673  * Return: true if success
9674  */
9675 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9676 {
9677 	struct dc_link *link = stream->link;
9678 	unsigned int vsync_rate_hz = 0;
9679 	struct dc_static_screen_params params = {0};
9680 	/* Calculate number of static frames before generating interrupt to
9681 	 * enter PSR.
9682 	 */
9683 	// Init fail safe of 2 frames static
9684 	unsigned int num_frames_static = 2;
9685 
9686 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9687 
9688 	vsync_rate_hz = div64_u64(div64_u64((
9689 			stream->timing.pix_clk_100hz * 100),
9690 			stream->timing.v_total),
9691 			stream->timing.h_total);
9692 
9693 	/* Round up
9694 	 * Calculate number of frames such that at least 30 ms of time has
9695 	 * passed.
9696 	 */
9697 	if (vsync_rate_hz != 0) {
9698 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9699 		num_frames_static = (30000 / frame_time_microsec) + 1;
9700 	}
9701 
9702 	params.triggers.cursor_update = true;
9703 	params.triggers.overlay_update = true;
9704 	params.triggers.surface_update = true;
9705 	params.num_frames = num_frames_static;
9706 
9707 	dc_stream_set_static_screen_params(link->ctx->dc,
9708 					   &stream, 1,
9709 					   &params);
9710 
9711 	return dc_link_set_psr_allow_active(link, true, false, false);
9712 }
9713 
9714 /*
9715  * amdgpu_dm_psr_disable() - disable psr f/w
9716  * @stream:  stream state
9717  *
9718  * Return: true if success
9719  */
9720 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9721 {
9722 
9723 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9724 
9725 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9726 }
9727 
9728 /*
9729  * amdgpu_dm_psr_disable() - disable psr f/w
9730  * if psr is enabled on any stream
9731  *
9732  * Return: true if success
9733  */
9734 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9735 {
9736 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9737 	return dc_set_psr_allow_active(dm->dc, false);
9738 }
9739 
9740 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9741 {
9742 	struct amdgpu_device *adev = drm_to_adev(dev);
9743 	struct dc *dc = adev->dm.dc;
9744 	int i;
9745 
9746 	mutex_lock(&adev->dm.dc_lock);
9747 	if (dc->current_state) {
9748 		for (i = 0; i < dc->current_state->stream_count; ++i)
9749 			dc->current_state->streams[i]
9750 				->triggered_crtc_reset.enabled =
9751 				adev->dm.force_timing_sync;
9752 
9753 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9754 		dc_trigger_sync(dc, dc->current_state);
9755 	}
9756 	mutex_unlock(&adev->dm.dc_lock);
9757 }
9758 
9759 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9760 		       uint32_t value, const char *func_name)
9761 {
9762 #ifdef DM_CHECK_ADDR_0
9763 	if (address == 0) {
9764 		DC_ERR("invalid register write. address = 0");
9765 		return;
9766 	}
9767 #endif
9768 	cgs_write_register(ctx->cgs_device, address, value);
9769 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9770 }
9771 
9772 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9773 			  const char *func_name)
9774 {
9775 	uint32_t value;
9776 #ifdef DM_CHECK_ADDR_0
9777 	if (address == 0) {
9778 		DC_ERR("invalid register read; address = 0\n");
9779 		return 0;
9780 	}
9781 #endif
9782 
9783 	if (ctx->dmub_srv &&
9784 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9785 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9786 		ASSERT(false);
9787 		return 0;
9788 	}
9789 
9790 	value = cgs_read_register(ctx->cgs_device, address);
9791 
9792 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9793 
9794 	return value;
9795 }
9796