xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 30adeee52d1ebadd8e4e594a54c7cf77250b91db)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #endif
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 				 struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238 	if (crtc >= adev->mode_info.num_crtc)
239 		return 0;
240 	else {
241 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242 
243 		if (acrtc->dm_irq_params.stream == NULL) {
244 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 				  crtc);
246 			return 0;
247 		}
248 
249 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250 	}
251 }
252 
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 				  u32 *vbl, u32 *position)
255 {
256 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
257 
258 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 		return -EINVAL;
260 	else {
261 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262 
263 		if (acrtc->dm_irq_params.stream ==  NULL) {
264 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 				  crtc);
266 			return 0;
267 		}
268 
269 		/*
270 		 * TODO rework base driver to use values directly.
271 		 * for now parse it back into reg-format
272 		 */
273 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 					 &v_blank_start,
275 					 &v_blank_end,
276 					 &h_position,
277 					 &v_position);
278 
279 		*position = v_position | (h_position << 16);
280 		*vbl = v_blank_start | (v_blank_end << 16);
281 	}
282 
283 	return 0;
284 }
285 
286 static bool dm_is_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return true;
290 }
291 
292 static int dm_wait_for_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return 0;
296 }
297 
298 static bool dm_check_soft_reset(void *handle)
299 {
300 	return false;
301 }
302 
303 static int dm_soft_reset(void *handle)
304 {
305 	/* XXX todo */
306 	return 0;
307 }
308 
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 		     int otg_inst)
312 {
313 	struct drm_device *dev = adev_to_drm(adev);
314 	struct drm_crtc *crtc;
315 	struct amdgpu_crtc *amdgpu_crtc;
316 
317 	if (WARN_ON(otg_inst == -1))
318 		return adev->mode_info.crtcs[0];
319 
320 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 		amdgpu_crtc = to_amdgpu_crtc(crtc);
322 
323 		if (amdgpu_crtc->otg_inst == otg_inst)
324 			return amdgpu_crtc;
325 	}
326 
327 	return NULL;
328 }
329 
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332 	return acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_VARIABLE ||
334 	       acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 					      struct dm_crtc_state *new_state)
346 {
347 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348 		return true;
349 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 		return true;
351 	else
352 		return false;
353 }
354 
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364 	struct amdgpu_crtc *amdgpu_crtc;
365 	struct common_irq_params *irq_params = interrupt_params;
366 	struct amdgpu_device *adev = irq_params->adev;
367 	unsigned long flags;
368 	struct drm_pending_vblank_event *e;
369 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 	bool vrr_active;
371 
372 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373 
374 	/* IRQ could occur when in initial stage */
375 	/* TODO work and BO cleanup */
376 	if (amdgpu_crtc == NULL) {
377 		DC_LOG_PFLIP("CRTC is null, returning.\n");
378 		return;
379 	}
380 
381 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382 
383 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 						 amdgpu_crtc->pflip_status,
386 						 AMDGPU_FLIP_SUBMITTED,
387 						 amdgpu_crtc->crtc_id,
388 						 amdgpu_crtc);
389 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 		return;
391 	}
392 
393 	/* page flip completed. */
394 	e = amdgpu_crtc->event;
395 	amdgpu_crtc->event = NULL;
396 
397 	WARN_ON(!e);
398 
399 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 
401 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 	if (!vrr_active ||
403 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
404 				      &v_blank_end, &hpos, &vpos) ||
405 	    (vpos < v_blank_start)) {
406 		/* Update to correct count and vblank timestamp if racing with
407 		 * vblank irq. This also updates to the correct vblank timestamp
408 		 * even in VRR mode, as scanout is past the front-porch atm.
409 		 */
410 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 
412 		/* Wake up userspace by sending the pageflip event with proper
413 		 * count and timestamp of vblank of flip completion.
414 		 */
415 		if (e) {
416 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 
418 			/* Event sent, so done with vblank for this flip */
419 			drm_crtc_vblank_put(&amdgpu_crtc->base);
420 		}
421 	} else if (e) {
422 		/* VRR active and inside front-porch: vblank count and
423 		 * timestamp for pageflip event will only be up to date after
424 		 * drm_crtc_handle_vblank() has been executed from late vblank
425 		 * irq handler after start of back-porch (vline 0). We queue the
426 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
427 		 * updated timestamp and count, once it runs after us.
428 		 *
429 		 * We need to open-code this instead of using the helper
430 		 * drm_crtc_arm_vblank_event(), as that helper would
431 		 * call drm_crtc_accurate_vblank_count(), which we must
432 		 * not call in VRR mode while we are in front-porch!
433 		 */
434 
435 		/* sequence will be replaced by real count during send-out. */
436 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
437 		e->pipe = amdgpu_crtc->crtc_id;
438 
439 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
440 		e = NULL;
441 	}
442 
443 	/* Keep track of vblank of this flip for flip throttling. We use the
444 	 * cooked hw counter, as that one incremented at start of this vblank
445 	 * of pageflip completion, so last_flip_vblank is the forbidden count
446 	 * for queueing new pageflips if vsync + VRR is enabled.
447 	 */
448 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
449 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 
451 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
452 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 
454 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
455 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
456 		     vrr_active, (int) !e);
457 }
458 
459 static void dm_vupdate_high_irq(void *interrupt_params)
460 {
461 	struct common_irq_params *irq_params = interrupt_params;
462 	struct amdgpu_device *adev = irq_params->adev;
463 	struct amdgpu_crtc *acrtc;
464 	struct drm_device *drm_dev;
465 	struct drm_vblank_crtc *vblank;
466 	ktime_t frame_duration_ns, previous_timestamp;
467 	unsigned long flags;
468 	int vrr_active;
469 
470 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471 
472 	if (acrtc) {
473 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
474 		drm_dev = acrtc->base.dev;
475 		vblank = &drm_dev->vblank[acrtc->base.index];
476 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
477 		frame_duration_ns = vblank->time - previous_timestamp;
478 
479 		if (frame_duration_ns > 0) {
480 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 						frame_duration_ns,
482 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
483 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 		}
485 
486 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
487 			      acrtc->crtc_id,
488 			      vrr_active);
489 
490 		/* Core vblank handling is done here after end of front-porch in
491 		 * vrr mode, as vblank timestamping will give valid results
492 		 * while now done after front-porch. This will also deliver
493 		 * page-flip completion events that have been queued to us
494 		 * if a pageflip happened inside front-porch.
495 		 */
496 		if (vrr_active) {
497 			drm_crtc_handle_vblank(&acrtc->base);
498 
499 			/* BTR processing for pre-DCE12 ASICs */
500 			if (acrtc->dm_irq_params.stream &&
501 			    adev->family < AMDGPU_FAMILY_AI) {
502 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
503 				mod_freesync_handle_v_update(
504 				    adev->dm.freesync_module,
505 				    acrtc->dm_irq_params.stream,
506 				    &acrtc->dm_irq_params.vrr_params);
507 
508 				dc_stream_adjust_vmin_vmax(
509 				    adev->dm.dc,
510 				    acrtc->dm_irq_params.stream,
511 				    &acrtc->dm_irq_params.vrr_params.adjust);
512 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
513 			}
514 		}
515 	}
516 }
517 
518 /**
519  * dm_crtc_high_irq() - Handles CRTC interrupt
520  * @interrupt_params: used for determining the CRTC instance
521  *
522  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523  * event handler.
524  */
525 static void dm_crtc_high_irq(void *interrupt_params)
526 {
527 	struct common_irq_params *irq_params = interrupt_params;
528 	struct amdgpu_device *adev = irq_params->adev;
529 	struct amdgpu_crtc *acrtc;
530 	unsigned long flags;
531 	int vrr_active;
532 
533 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
534 	if (!acrtc)
535 		return;
536 
537 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 
539 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
540 		      vrr_active, acrtc->dm_irq_params.active_planes);
541 
542 	/**
543 	 * Core vblank handling at start of front-porch is only possible
544 	 * in non-vrr mode, as only there vblank timestamping will give
545 	 * valid results while done in front-porch. Otherwise defer it
546 	 * to dm_vupdate_high_irq after end of front-porch.
547 	 */
548 	if (!vrr_active)
549 		drm_crtc_handle_vblank(&acrtc->base);
550 
551 	/**
552 	 * Following stuff must happen at start of vblank, for crc
553 	 * computation and below-the-range btr support in vrr mode.
554 	 */
555 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 
557 	/* BTR updates need to happen before VUPDATE on Vega and above. */
558 	if (adev->family < AMDGPU_FAMILY_AI)
559 		return;
560 
561 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 
563 	if (acrtc->dm_irq_params.stream &&
564 	    acrtc->dm_irq_params.vrr_params.supported &&
565 	    acrtc->dm_irq_params.freesync_config.state ==
566 		    VRR_STATE_ACTIVE_VARIABLE) {
567 		mod_freesync_handle_v_update(adev->dm.freesync_module,
568 					     acrtc->dm_irq_params.stream,
569 					     &acrtc->dm_irq_params.vrr_params);
570 
571 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
572 					   &acrtc->dm_irq_params.vrr_params.adjust);
573 	}
574 
575 	/*
576 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
577 	 * In that case, pageflip completion interrupts won't fire and pageflip
578 	 * completion events won't get delivered. Prevent this by sending
579 	 * pending pageflip events from here if a flip is still pending.
580 	 *
581 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
582 	 * avoid race conditions between flip programming and completion,
583 	 * which could cause too early flip completion events.
584 	 */
585 	if (adev->family >= AMDGPU_FAMILY_RV &&
586 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
587 	    acrtc->dm_irq_params.active_planes == 0) {
588 		if (acrtc->event) {
589 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 			acrtc->event = NULL;
591 			drm_crtc_vblank_put(&acrtc->base);
592 		}
593 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 	}
595 
596 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
597 }
598 
599 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt_params: interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 {
610 	struct common_irq_params *irq_params = interrupt_params;
611 	struct amdgpu_device *adev = irq_params->adev;
612 	struct amdgpu_crtc *acrtc;
613 
614 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
615 
616 	if (!acrtc)
617 		return;
618 
619 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
620 }
621 #endif
622 
623 /**
624  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
625  * @interrupt_params: used for determining the Outbox instance
626  *
627  * Handles the Outbox Interrupt
628  * event handler.
629  */
630 #define DMUB_TRACE_MAX_READ 64
631 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
632 {
633 	struct dmub_notification notify;
634 	struct common_irq_params *irq_params = interrupt_params;
635 	struct amdgpu_device *adev = irq_params->adev;
636 	struct amdgpu_display_manager *dm = &adev->dm;
637 	struct dmcub_trace_buf_entry entry = { 0 };
638 	uint32_t count = 0;
639 
640 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
641 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
642 			do {
643 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
644 			} while (notify.pending_notification);
645 
646 			if (adev->dm.dmub_notify)
647 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
648 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
649 				complete(&adev->dm.dmub_aux_transfer_done);
650 			// TODO : HPD Implementation
651 
652 		} else {
653 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
654 		}
655 	}
656 
657 
658 	do {
659 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
660 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
661 							entry.param0, entry.param1);
662 
663 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
664 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
665 		} else
666 			break;
667 
668 		count++;
669 
670 	} while (count <= DMUB_TRACE_MAX_READ);
671 
672 	ASSERT(count <= DMUB_TRACE_MAX_READ);
673 }
674 #endif
675 
676 static int dm_set_clockgating_state(void *handle,
677 		  enum amd_clockgating_state state)
678 {
679 	return 0;
680 }
681 
682 static int dm_set_powergating_state(void *handle,
683 		  enum amd_powergating_state state)
684 {
685 	return 0;
686 }
687 
688 /* Prototypes of private functions */
689 static int dm_early_init(void* handle);
690 
691 /* Allocate memory for FBC compressed data  */
692 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
693 {
694 	struct drm_device *dev = connector->dev;
695 	struct amdgpu_device *adev = drm_to_adev(dev);
696 	struct dm_compressor_info *compressor = &adev->dm.compressor;
697 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
698 	struct drm_display_mode *mode;
699 	unsigned long max_size = 0;
700 
701 	if (adev->dm.dc->fbc_compressor == NULL)
702 		return;
703 
704 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
705 		return;
706 
707 	if (compressor->bo_ptr)
708 		return;
709 
710 
711 	list_for_each_entry(mode, &connector->modes, head) {
712 		if (max_size < mode->htotal * mode->vtotal)
713 			max_size = mode->htotal * mode->vtotal;
714 	}
715 
716 	if (max_size) {
717 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
718 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
719 			    &compressor->gpu_addr, &compressor->cpu_addr);
720 
721 		if (r)
722 			DRM_ERROR("DM: Failed to initialize FBC\n");
723 		else {
724 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
725 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
726 		}
727 
728 	}
729 
730 }
731 
732 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
733 					  int pipe, bool *enabled,
734 					  unsigned char *buf, int max_bytes)
735 {
736 	struct drm_device *dev = dev_get_drvdata(kdev);
737 	struct amdgpu_device *adev = drm_to_adev(dev);
738 	struct drm_connector *connector;
739 	struct drm_connector_list_iter conn_iter;
740 	struct amdgpu_dm_connector *aconnector;
741 	int ret = 0;
742 
743 	*enabled = false;
744 
745 	mutex_lock(&adev->dm.audio_lock);
746 
747 	drm_connector_list_iter_begin(dev, &conn_iter);
748 	drm_for_each_connector_iter(connector, &conn_iter) {
749 		aconnector = to_amdgpu_dm_connector(connector);
750 		if (aconnector->audio_inst != port)
751 			continue;
752 
753 		*enabled = true;
754 		ret = drm_eld_size(connector->eld);
755 		memcpy(buf, connector->eld, min(max_bytes, ret));
756 
757 		break;
758 	}
759 	drm_connector_list_iter_end(&conn_iter);
760 
761 	mutex_unlock(&adev->dm.audio_lock);
762 
763 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
764 
765 	return ret;
766 }
767 
768 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
769 	.get_eld = amdgpu_dm_audio_component_get_eld,
770 };
771 
772 static int amdgpu_dm_audio_component_bind(struct device *kdev,
773 				       struct device *hda_kdev, void *data)
774 {
775 	struct drm_device *dev = dev_get_drvdata(kdev);
776 	struct amdgpu_device *adev = drm_to_adev(dev);
777 	struct drm_audio_component *acomp = data;
778 
779 	acomp->ops = &amdgpu_dm_audio_component_ops;
780 	acomp->dev = kdev;
781 	adev->dm.audio_component = acomp;
782 
783 	return 0;
784 }
785 
786 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
787 					  struct device *hda_kdev, void *data)
788 {
789 	struct drm_device *dev = dev_get_drvdata(kdev);
790 	struct amdgpu_device *adev = drm_to_adev(dev);
791 	struct drm_audio_component *acomp = data;
792 
793 	acomp->ops = NULL;
794 	acomp->dev = NULL;
795 	adev->dm.audio_component = NULL;
796 }
797 
798 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
799 	.bind	= amdgpu_dm_audio_component_bind,
800 	.unbind	= amdgpu_dm_audio_component_unbind,
801 };
802 
803 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
804 {
805 	int i, ret;
806 
807 	if (!amdgpu_audio)
808 		return 0;
809 
810 	adev->mode_info.audio.enabled = true;
811 
812 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
813 
814 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
815 		adev->mode_info.audio.pin[i].channels = -1;
816 		adev->mode_info.audio.pin[i].rate = -1;
817 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
818 		adev->mode_info.audio.pin[i].status_bits = 0;
819 		adev->mode_info.audio.pin[i].category_code = 0;
820 		adev->mode_info.audio.pin[i].connected = false;
821 		adev->mode_info.audio.pin[i].id =
822 			adev->dm.dc->res_pool->audios[i]->inst;
823 		adev->mode_info.audio.pin[i].offset = 0;
824 	}
825 
826 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
827 	if (ret < 0)
828 		return ret;
829 
830 	adev->dm.audio_registered = true;
831 
832 	return 0;
833 }
834 
835 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
836 {
837 	if (!amdgpu_audio)
838 		return;
839 
840 	if (!adev->mode_info.audio.enabled)
841 		return;
842 
843 	if (adev->dm.audio_registered) {
844 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
845 		adev->dm.audio_registered = false;
846 	}
847 
848 	/* TODO: Disable audio? */
849 
850 	adev->mode_info.audio.enabled = false;
851 }
852 
853 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
854 {
855 	struct drm_audio_component *acomp = adev->dm.audio_component;
856 
857 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
858 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
859 
860 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
861 						 pin, -1);
862 	}
863 }
864 
865 static int dm_dmub_hw_init(struct amdgpu_device *adev)
866 {
867 	const struct dmcub_firmware_header_v1_0 *hdr;
868 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
869 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
870 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
871 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
872 	struct abm *abm = adev->dm.dc->res_pool->abm;
873 	struct dmub_srv_hw_params hw_params;
874 	enum dmub_status status;
875 	const unsigned char *fw_inst_const, *fw_bss_data;
876 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
877 	bool has_hw_support;
878 
879 	if (!dmub_srv)
880 		/* DMUB isn't supported on the ASIC. */
881 		return 0;
882 
883 	if (!fb_info) {
884 		DRM_ERROR("No framebuffer info for DMUB service.\n");
885 		return -EINVAL;
886 	}
887 
888 	if (!dmub_fw) {
889 		/* Firmware required for DMUB support. */
890 		DRM_ERROR("No firmware provided for DMUB.\n");
891 		return -EINVAL;
892 	}
893 
894 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
895 	if (status != DMUB_STATUS_OK) {
896 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
897 		return -EINVAL;
898 	}
899 
900 	if (!has_hw_support) {
901 		DRM_INFO("DMUB unsupported on ASIC\n");
902 		return 0;
903 	}
904 
905 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
906 
907 	fw_inst_const = dmub_fw->data +
908 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
909 			PSP_HEADER_BYTES;
910 
911 	fw_bss_data = dmub_fw->data +
912 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913 		      le32_to_cpu(hdr->inst_const_bytes);
914 
915 	/* Copy firmware and bios info into FB memory. */
916 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
917 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
918 
919 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
920 
921 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
922 	 * amdgpu_ucode_init_single_fw will load dmub firmware
923 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
924 	 * will be done by dm_dmub_hw_init
925 	 */
926 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
927 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
928 				fw_inst_const_size);
929 	}
930 
931 	if (fw_bss_data_size)
932 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
933 		       fw_bss_data, fw_bss_data_size);
934 
935 	/* Copy firmware bios info into FB memory. */
936 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 	       adev->bios_size);
938 
939 	/* Reset regions that need to be reset. */
940 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
941 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
942 
943 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
944 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
945 
946 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
947 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
948 
949 	/* Initialize hardware. */
950 	memset(&hw_params, 0, sizeof(hw_params));
951 	hw_params.fb_base = adev->gmc.fb_start;
952 	hw_params.fb_offset = adev->gmc.aper_base;
953 
954 	/* backdoor load firmware and trigger dmub running */
955 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
956 		hw_params.load_inst_const = true;
957 
958 	if (dmcu)
959 		hw_params.psp_version = dmcu->psp_version;
960 
961 	for (i = 0; i < fb_info->num_fb; ++i)
962 		hw_params.fb[i] = &fb_info->fb[i];
963 
964 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
965 	if (status != DMUB_STATUS_OK) {
966 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
967 		return -EINVAL;
968 	}
969 
970 	/* Wait for firmware load to finish. */
971 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
972 	if (status != DMUB_STATUS_OK)
973 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
974 
975 	/* Init DMCU and ABM if available. */
976 	if (dmcu && abm) {
977 		dmcu->funcs->dmcu_init(dmcu);
978 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 	}
980 
981 	if (!adev->dm.dc->ctx->dmub_srv)
982 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 	if (!adev->dm.dc->ctx->dmub_srv) {
984 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 		return -ENOMEM;
986 	}
987 
988 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 		 adev->dm.dmcub_fw_version);
990 
991 	return 0;
992 }
993 
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997 	uint64_t pt_base;
998 	uint32_t logical_addr_low;
999 	uint32_t logical_addr_high;
1000 	uint32_t agp_base, agp_bot, agp_top;
1001 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002 
1003 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005 
1006 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007 		/*
1008 		 * Raven2 has a HW issue that it is unable to use the vram which
1009 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010 		 * workaround that increase system aperture high address (add 1)
1011 		 * to get rid of the VM fault and hardware hang.
1012 		 */
1013 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014 	else
1015 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016 
1017 	agp_base = 0;
1018 	agp_bot = adev->gmc.agp_start >> 24;
1019 	agp_top = adev->gmc.agp_end >> 24;
1020 
1021 
1022 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 	page_table_base.low_part = lower_32_bits(pt_base);
1028 
1029 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031 
1032 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035 
1036 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039 
1040 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043 
1044 	pa_config->is_hvm_enabled = 0;
1045 
1046 }
1047 #endif
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1050 {
1051 
1052 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053 	struct amdgpu_display_manager *dm = vblank_work->dm;
1054 
1055 	mutex_lock(&dm->dc_lock);
1056 
1057 	if (vblank_work->enable)
1058 		dm->active_vblank_irq_count++;
1059 	else if(dm->active_vblank_irq_count)
1060 		dm->active_vblank_irq_count--;
1061 
1062 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063 
1064 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065 
1066 	mutex_unlock(&dm->dc_lock);
1067 }
1068 
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 {
1071 
1072 	int max_caps = dc->caps.max_links;
1073 	struct vblank_workqueue *vblank_work;
1074 	int i = 0;
1075 
1076 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1078 		kfree(vblank_work);
1079 		return NULL;
1080 	}
1081 
1082 	for (i = 0; i < max_caps; i++)
1083 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084 
1085 	return vblank_work;
1086 }
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090 	struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092 	struct dc_callback_init init_params;
1093 #endif
1094 	int r;
1095 
1096 	adev->dm.ddev = adev_to_drm(adev);
1097 	adev->dm.adev = adev;
1098 
1099 	/* Zero all the fields */
1100 	memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102 	memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104 
1105 	mutex_init(&adev->dm.dc_lock);
1106 	mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108 	spin_lock_init(&adev->dm.vblank_lock);
1109 #endif
1110 
1111 	if(amdgpu_dm_irq_init(adev)) {
1112 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113 		goto error;
1114 	}
1115 
1116 	init_data.asic_id.chip_family = adev->family;
1117 
1118 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120 
1121 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1122 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123 	init_data.asic_id.atombios_base_address =
1124 		adev->mode_info.atom_context->bios;
1125 
1126 	init_data.driver = adev;
1127 
1128 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129 
1130 	if (!adev->dm.cgs_device) {
1131 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132 		goto error;
1133 	}
1134 
1135 	init_data.cgs_device = adev->dm.cgs_device;
1136 
1137 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138 
1139 	switch (adev->asic_type) {
1140 	case CHIP_CARRIZO:
1141 	case CHIP_STONEY:
1142 	case CHIP_RAVEN:
1143 	case CHIP_RENOIR:
1144 		init_data.flags.gpu_vm_support = true;
1145 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146 			init_data.flags.disable_dmcu = true;
1147 		break;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149 	case CHIP_VANGOGH:
1150 		init_data.flags.gpu_vm_support = true;
1151 		break;
1152 #endif
1153 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1154 	case CHIP_YELLOW_CARP:
1155 		init_data.flags.gpu_vm_support = true;
1156 		break;
1157 #endif
1158 	default:
1159 		break;
1160 	}
1161 
1162 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1163 		init_data.flags.fbc_support = true;
1164 
1165 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1166 		init_data.flags.multi_mon_pp_mclk_switch = true;
1167 
1168 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1169 		init_data.flags.disable_fractional_pwm = true;
1170 
1171 	init_data.flags.power_down_display_on_boot = true;
1172 
1173 	INIT_LIST_HEAD(&adev->dm.da_list);
1174 	/* Display Core create. */
1175 	adev->dm.dc = dc_create(&init_data);
1176 
1177 	if (adev->dm.dc) {
1178 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1179 	} else {
1180 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1181 		goto error;
1182 	}
1183 
1184 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1185 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1186 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1187 	}
1188 
1189 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1190 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1191 
1192 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1193 		adev->dm.dc->debug.disable_stutter = true;
1194 
1195 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1196 		adev->dm.dc->debug.disable_dsc = true;
1197 
1198 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1199 		adev->dm.dc->debug.disable_clock_gate = true;
1200 
1201 	r = dm_dmub_hw_init(adev);
1202 	if (r) {
1203 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1204 		goto error;
1205 	}
1206 
1207 	dc_hardware_init(adev->dm.dc);
1208 
1209 #if defined(CONFIG_DRM_AMD_DC_DCN)
1210 	if (adev->apu_flags) {
1211 		struct dc_phy_addr_space_config pa_config;
1212 
1213 		mmhub_read_system_context(adev, &pa_config);
1214 
1215 		// Call the DC init_memory func
1216 		dc_setup_system_context(adev->dm.dc, &pa_config);
1217 	}
1218 #endif
1219 
1220 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1221 	if (!adev->dm.freesync_module) {
1222 		DRM_ERROR(
1223 		"amdgpu: failed to initialize freesync_module.\n");
1224 	} else
1225 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1226 				adev->dm.freesync_module);
1227 
1228 	amdgpu_dm_init_color_mod();
1229 
1230 #if defined(CONFIG_DRM_AMD_DC_DCN)
1231 	if (adev->dm.dc->caps.max_links > 0) {
1232 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1233 
1234 		if (!adev->dm.vblank_workqueue)
1235 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1236 		else
1237 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1238 	}
1239 #endif
1240 
1241 #ifdef CONFIG_DRM_AMD_DC_HDCP
1242 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1243 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1244 
1245 		if (!adev->dm.hdcp_workqueue)
1246 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1247 		else
1248 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1249 
1250 		dc_init_callbacks(adev->dm.dc, &init_params);
1251 	}
1252 #endif
1253 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1254 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1255 #endif
1256 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1257 		init_completion(&adev->dm.dmub_aux_transfer_done);
1258 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1259 		if (!adev->dm.dmub_notify) {
1260 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1261 			goto error;
1262 		}
1263 		amdgpu_dm_outbox_init(adev);
1264 	}
1265 
1266 	if (amdgpu_dm_initialize_drm_device(adev)) {
1267 		DRM_ERROR(
1268 		"amdgpu: failed to initialize sw for display support.\n");
1269 		goto error;
1270 	}
1271 
1272 	/* create fake encoders for MST */
1273 	dm_dp_create_fake_mst_encoders(adev);
1274 
1275 	/* TODO: Add_display_info? */
1276 
1277 	/* TODO use dynamic cursor width */
1278 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1279 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1280 
1281 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1282 		DRM_ERROR(
1283 		"amdgpu: failed to initialize sw for display support.\n");
1284 		goto error;
1285 	}
1286 
1287 
1288 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1289 
1290 	return 0;
1291 error:
1292 	amdgpu_dm_fini(adev);
1293 
1294 	return -EINVAL;
1295 }
1296 
1297 static int amdgpu_dm_early_fini(void *handle)
1298 {
1299 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300 
1301 	amdgpu_dm_audio_fini(adev);
1302 
1303 	return 0;
1304 }
1305 
1306 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1307 {
1308 	int i;
1309 
1310 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1311 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1312 	}
1313 
1314 	amdgpu_dm_destroy_drm_device(&adev->dm);
1315 
1316 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1317 	if (adev->dm.crc_rd_wrk) {
1318 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1319 		kfree(adev->dm.crc_rd_wrk);
1320 		adev->dm.crc_rd_wrk = NULL;
1321 	}
1322 #endif
1323 #ifdef CONFIG_DRM_AMD_DC_HDCP
1324 	if (adev->dm.hdcp_workqueue) {
1325 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1326 		adev->dm.hdcp_workqueue = NULL;
1327 	}
1328 
1329 	if (adev->dm.dc)
1330 		dc_deinit_callbacks(adev->dm.dc);
1331 #endif
1332 
1333 #if defined(CONFIG_DRM_AMD_DC_DCN)
1334 	if (adev->dm.vblank_workqueue) {
1335 		adev->dm.vblank_workqueue->dm = NULL;
1336 		kfree(adev->dm.vblank_workqueue);
1337 		adev->dm.vblank_workqueue = NULL;
1338 	}
1339 #endif
1340 
1341 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1342 
1343 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1344 		kfree(adev->dm.dmub_notify);
1345 		adev->dm.dmub_notify = NULL;
1346 	}
1347 
1348 	if (adev->dm.dmub_bo)
1349 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1350 				      &adev->dm.dmub_bo_gpu_addr,
1351 				      &adev->dm.dmub_bo_cpu_addr);
1352 
1353 	/* DC Destroy TODO: Replace destroy DAL */
1354 	if (adev->dm.dc)
1355 		dc_destroy(&adev->dm.dc);
1356 	/*
1357 	 * TODO: pageflip, vlank interrupt
1358 	 *
1359 	 * amdgpu_dm_irq_fini(adev);
1360 	 */
1361 
1362 	if (adev->dm.cgs_device) {
1363 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1364 		adev->dm.cgs_device = NULL;
1365 	}
1366 	if (adev->dm.freesync_module) {
1367 		mod_freesync_destroy(adev->dm.freesync_module);
1368 		adev->dm.freesync_module = NULL;
1369 	}
1370 
1371 	mutex_destroy(&adev->dm.audio_lock);
1372 	mutex_destroy(&adev->dm.dc_lock);
1373 
1374 	return;
1375 }
1376 
1377 static int load_dmcu_fw(struct amdgpu_device *adev)
1378 {
1379 	const char *fw_name_dmcu = NULL;
1380 	int r;
1381 	const struct dmcu_firmware_header_v1_0 *hdr;
1382 
1383 	switch(adev->asic_type) {
1384 #if defined(CONFIG_DRM_AMD_DC_SI)
1385 	case CHIP_TAHITI:
1386 	case CHIP_PITCAIRN:
1387 	case CHIP_VERDE:
1388 	case CHIP_OLAND:
1389 #endif
1390 	case CHIP_BONAIRE:
1391 	case CHIP_HAWAII:
1392 	case CHIP_KAVERI:
1393 	case CHIP_KABINI:
1394 	case CHIP_MULLINS:
1395 	case CHIP_TONGA:
1396 	case CHIP_FIJI:
1397 	case CHIP_CARRIZO:
1398 	case CHIP_STONEY:
1399 	case CHIP_POLARIS11:
1400 	case CHIP_POLARIS10:
1401 	case CHIP_POLARIS12:
1402 	case CHIP_VEGAM:
1403 	case CHIP_VEGA10:
1404 	case CHIP_VEGA12:
1405 	case CHIP_VEGA20:
1406 	case CHIP_NAVI10:
1407 	case CHIP_NAVI14:
1408 	case CHIP_RENOIR:
1409 	case CHIP_SIENNA_CICHLID:
1410 	case CHIP_NAVY_FLOUNDER:
1411 	case CHIP_DIMGREY_CAVEFISH:
1412 	case CHIP_BEIGE_GOBY:
1413 	case CHIP_VANGOGH:
1414 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1415 	case CHIP_YELLOW_CARP:
1416 #endif
1417 		return 0;
1418 	case CHIP_NAVI12:
1419 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1420 		break;
1421 	case CHIP_RAVEN:
1422 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1423 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1424 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1425 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1426 		else
1427 			return 0;
1428 		break;
1429 	default:
1430 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1435 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1436 		return 0;
1437 	}
1438 
1439 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1440 	if (r == -ENOENT) {
1441 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1442 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1443 		adev->dm.fw_dmcu = NULL;
1444 		return 0;
1445 	}
1446 	if (r) {
1447 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1448 			fw_name_dmcu);
1449 		return r;
1450 	}
1451 
1452 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1453 	if (r) {
1454 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1455 			fw_name_dmcu);
1456 		release_firmware(adev->dm.fw_dmcu);
1457 		adev->dm.fw_dmcu = NULL;
1458 		return r;
1459 	}
1460 
1461 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1462 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1463 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1464 	adev->firmware.fw_size +=
1465 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1466 
1467 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1468 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1469 	adev->firmware.fw_size +=
1470 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1471 
1472 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1473 
1474 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1475 
1476 	return 0;
1477 }
1478 
1479 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1480 {
1481 	struct amdgpu_device *adev = ctx;
1482 
1483 	return dm_read_reg(adev->dm.dc->ctx, address);
1484 }
1485 
1486 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1487 				     uint32_t value)
1488 {
1489 	struct amdgpu_device *adev = ctx;
1490 
1491 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1492 }
1493 
1494 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1495 {
1496 	struct dmub_srv_create_params create_params;
1497 	struct dmub_srv_region_params region_params;
1498 	struct dmub_srv_region_info region_info;
1499 	struct dmub_srv_fb_params fb_params;
1500 	struct dmub_srv_fb_info *fb_info;
1501 	struct dmub_srv *dmub_srv;
1502 	const struct dmcub_firmware_header_v1_0 *hdr;
1503 	const char *fw_name_dmub;
1504 	enum dmub_asic dmub_asic;
1505 	enum dmub_status status;
1506 	int r;
1507 
1508 	switch (adev->asic_type) {
1509 	case CHIP_RENOIR:
1510 		dmub_asic = DMUB_ASIC_DCN21;
1511 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1512 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1513 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1514 		break;
1515 	case CHIP_SIENNA_CICHLID:
1516 		dmub_asic = DMUB_ASIC_DCN30;
1517 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1518 		break;
1519 	case CHIP_NAVY_FLOUNDER:
1520 		dmub_asic = DMUB_ASIC_DCN30;
1521 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1522 		break;
1523 	case CHIP_VANGOGH:
1524 		dmub_asic = DMUB_ASIC_DCN301;
1525 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1526 		break;
1527 	case CHIP_DIMGREY_CAVEFISH:
1528 		dmub_asic = DMUB_ASIC_DCN302;
1529 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1530 		break;
1531 	case CHIP_BEIGE_GOBY:
1532 		dmub_asic = DMUB_ASIC_DCN303;
1533 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1534 		break;
1535 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1536 	case CHIP_YELLOW_CARP:
1537 		dmub_asic = DMUB_ASIC_DCN31;
1538 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1539 		break;
1540 #endif
1541 
1542 	default:
1543 		/* ASIC doesn't support DMUB. */
1544 		return 0;
1545 	}
1546 
1547 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1548 	if (r) {
1549 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1550 		return 0;
1551 	}
1552 
1553 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1554 	if (r) {
1555 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1556 		return 0;
1557 	}
1558 
1559 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1560 
1561 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1562 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1563 			AMDGPU_UCODE_ID_DMCUB;
1564 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1565 			adev->dm.dmub_fw;
1566 		adev->firmware.fw_size +=
1567 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1568 
1569 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1570 			 adev->dm.dmcub_fw_version);
1571 	}
1572 
1573 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1574 
1575 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1576 	dmub_srv = adev->dm.dmub_srv;
1577 
1578 	if (!dmub_srv) {
1579 		DRM_ERROR("Failed to allocate DMUB service!\n");
1580 		return -ENOMEM;
1581 	}
1582 
1583 	memset(&create_params, 0, sizeof(create_params));
1584 	create_params.user_ctx = adev;
1585 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1586 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1587 	create_params.asic = dmub_asic;
1588 
1589 	/* Create the DMUB service. */
1590 	status = dmub_srv_create(dmub_srv, &create_params);
1591 	if (status != DMUB_STATUS_OK) {
1592 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1593 		return -EINVAL;
1594 	}
1595 
1596 	/* Calculate the size of all the regions for the DMUB service. */
1597 	memset(&region_params, 0, sizeof(region_params));
1598 
1599 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1600 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1601 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1602 	region_params.vbios_size = adev->bios_size;
1603 	region_params.fw_bss_data = region_params.bss_data_size ?
1604 		adev->dm.dmub_fw->data +
1605 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1606 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1607 	region_params.fw_inst_const =
1608 		adev->dm.dmub_fw->data +
1609 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1610 		PSP_HEADER_BYTES;
1611 
1612 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1613 					   &region_info);
1614 
1615 	if (status != DMUB_STATUS_OK) {
1616 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1617 		return -EINVAL;
1618 	}
1619 
1620 	/*
1621 	 * Allocate a framebuffer based on the total size of all the regions.
1622 	 * TODO: Move this into GART.
1623 	 */
1624 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1625 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1626 				    &adev->dm.dmub_bo_gpu_addr,
1627 				    &adev->dm.dmub_bo_cpu_addr);
1628 	if (r)
1629 		return r;
1630 
1631 	/* Rebase the regions on the framebuffer address. */
1632 	memset(&fb_params, 0, sizeof(fb_params));
1633 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1634 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1635 	fb_params.region_info = &region_info;
1636 
1637 	adev->dm.dmub_fb_info =
1638 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1639 	fb_info = adev->dm.dmub_fb_info;
1640 
1641 	if (!fb_info) {
1642 		DRM_ERROR(
1643 			"Failed to allocate framebuffer info for DMUB service!\n");
1644 		return -ENOMEM;
1645 	}
1646 
1647 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1648 	if (status != DMUB_STATUS_OK) {
1649 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1650 		return -EINVAL;
1651 	}
1652 
1653 	return 0;
1654 }
1655 
1656 static int dm_sw_init(void *handle)
1657 {
1658 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1659 	int r;
1660 
1661 	r = dm_dmub_sw_init(adev);
1662 	if (r)
1663 		return r;
1664 
1665 	return load_dmcu_fw(adev);
1666 }
1667 
1668 static int dm_sw_fini(void *handle)
1669 {
1670 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1671 
1672 	kfree(adev->dm.dmub_fb_info);
1673 	adev->dm.dmub_fb_info = NULL;
1674 
1675 	if (adev->dm.dmub_srv) {
1676 		dmub_srv_destroy(adev->dm.dmub_srv);
1677 		adev->dm.dmub_srv = NULL;
1678 	}
1679 
1680 	release_firmware(adev->dm.dmub_fw);
1681 	adev->dm.dmub_fw = NULL;
1682 
1683 	release_firmware(adev->dm.fw_dmcu);
1684 	adev->dm.fw_dmcu = NULL;
1685 
1686 	return 0;
1687 }
1688 
1689 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1690 {
1691 	struct amdgpu_dm_connector *aconnector;
1692 	struct drm_connector *connector;
1693 	struct drm_connector_list_iter iter;
1694 	int ret = 0;
1695 
1696 	drm_connector_list_iter_begin(dev, &iter);
1697 	drm_for_each_connector_iter(connector, &iter) {
1698 		aconnector = to_amdgpu_dm_connector(connector);
1699 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1700 		    aconnector->mst_mgr.aux) {
1701 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1702 					 aconnector,
1703 					 aconnector->base.base.id);
1704 
1705 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1706 			if (ret < 0) {
1707 				DRM_ERROR("DM_MST: Failed to start MST\n");
1708 				aconnector->dc_link->type =
1709 					dc_connection_single;
1710 				break;
1711 			}
1712 		}
1713 	}
1714 	drm_connector_list_iter_end(&iter);
1715 
1716 	return ret;
1717 }
1718 
1719 static int dm_late_init(void *handle)
1720 {
1721 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1722 
1723 	struct dmcu_iram_parameters params;
1724 	unsigned int linear_lut[16];
1725 	int i;
1726 	struct dmcu *dmcu = NULL;
1727 
1728 	dmcu = adev->dm.dc->res_pool->dmcu;
1729 
1730 	for (i = 0; i < 16; i++)
1731 		linear_lut[i] = 0xFFFF * i / 15;
1732 
1733 	params.set = 0;
1734 	params.backlight_ramping_start = 0xCCCC;
1735 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1736 	params.backlight_lut_array_size = 16;
1737 	params.backlight_lut_array = linear_lut;
1738 
1739 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1740 	 * 0xFFFF x 0.01 = 0x28F
1741 	 */
1742 	params.min_abm_backlight = 0x28F;
1743 	/* In the case where abm is implemented on dmcub,
1744 	* dmcu object will be null.
1745 	* ABM 2.4 and up are implemented on dmcub.
1746 	*/
1747 	if (dmcu) {
1748 		if (!dmcu_load_iram(dmcu, params))
1749 			return -EINVAL;
1750 	} else if (adev->dm.dc->ctx->dmub_srv) {
1751 		struct dc_link *edp_links[MAX_NUM_EDP];
1752 		int edp_num;
1753 
1754 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1755 		for (i = 0; i < edp_num; i++) {
1756 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1757 				return -EINVAL;
1758 		}
1759 	}
1760 
1761 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1762 }
1763 
1764 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1765 {
1766 	struct amdgpu_dm_connector *aconnector;
1767 	struct drm_connector *connector;
1768 	struct drm_connector_list_iter iter;
1769 	struct drm_dp_mst_topology_mgr *mgr;
1770 	int ret;
1771 	bool need_hotplug = false;
1772 
1773 	drm_connector_list_iter_begin(dev, &iter);
1774 	drm_for_each_connector_iter(connector, &iter) {
1775 		aconnector = to_amdgpu_dm_connector(connector);
1776 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1777 		    aconnector->mst_port)
1778 			continue;
1779 
1780 		mgr = &aconnector->mst_mgr;
1781 
1782 		if (suspend) {
1783 			drm_dp_mst_topology_mgr_suspend(mgr);
1784 		} else {
1785 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1786 			if (ret < 0) {
1787 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1788 				need_hotplug = true;
1789 			}
1790 		}
1791 	}
1792 	drm_connector_list_iter_end(&iter);
1793 
1794 	if (need_hotplug)
1795 		drm_kms_helper_hotplug_event(dev);
1796 }
1797 
1798 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1799 {
1800 	struct smu_context *smu = &adev->smu;
1801 	int ret = 0;
1802 
1803 	if (!is_support_sw_smu(adev))
1804 		return 0;
1805 
1806 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1807 	 * on window driver dc implementation.
1808 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1809 	 * should be passed to smu during boot up and resume from s3.
1810 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1811 	 * dcn20_resource_construct
1812 	 * then call pplib functions below to pass the settings to smu:
1813 	 * smu_set_watermarks_for_clock_ranges
1814 	 * smu_set_watermarks_table
1815 	 * navi10_set_watermarks_table
1816 	 * smu_write_watermarks_table
1817 	 *
1818 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1819 	 * dc has implemented different flow for window driver:
1820 	 * dc_hardware_init / dc_set_power_state
1821 	 * dcn10_init_hw
1822 	 * notify_wm_ranges
1823 	 * set_wm_ranges
1824 	 * -- Linux
1825 	 * smu_set_watermarks_for_clock_ranges
1826 	 * renoir_set_watermarks_table
1827 	 * smu_write_watermarks_table
1828 	 *
1829 	 * For Linux,
1830 	 * dc_hardware_init -> amdgpu_dm_init
1831 	 * dc_set_power_state --> dm_resume
1832 	 *
1833 	 * therefore, this function apply to navi10/12/14 but not Renoir
1834 	 * *
1835 	 */
1836 	switch(adev->asic_type) {
1837 	case CHIP_NAVI10:
1838 	case CHIP_NAVI14:
1839 	case CHIP_NAVI12:
1840 		break;
1841 	default:
1842 		return 0;
1843 	}
1844 
1845 	ret = smu_write_watermarks_table(smu);
1846 	if (ret) {
1847 		DRM_ERROR("Failed to update WMTABLE!\n");
1848 		return ret;
1849 	}
1850 
1851 	return 0;
1852 }
1853 
1854 /**
1855  * dm_hw_init() - Initialize DC device
1856  * @handle: The base driver device containing the amdgpu_dm device.
1857  *
1858  * Initialize the &struct amdgpu_display_manager device. This involves calling
1859  * the initializers of each DM component, then populating the struct with them.
1860  *
1861  * Although the function implies hardware initialization, both hardware and
1862  * software are initialized here. Splitting them out to their relevant init
1863  * hooks is a future TODO item.
1864  *
1865  * Some notable things that are initialized here:
1866  *
1867  * - Display Core, both software and hardware
1868  * - DC modules that we need (freesync and color management)
1869  * - DRM software states
1870  * - Interrupt sources and handlers
1871  * - Vblank support
1872  * - Debug FS entries, if enabled
1873  */
1874 static int dm_hw_init(void *handle)
1875 {
1876 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1877 	/* Create DAL display manager */
1878 	amdgpu_dm_init(adev);
1879 	amdgpu_dm_hpd_init(adev);
1880 
1881 	return 0;
1882 }
1883 
1884 /**
1885  * dm_hw_fini() - Teardown DC device
1886  * @handle: The base driver device containing the amdgpu_dm device.
1887  *
1888  * Teardown components within &struct amdgpu_display_manager that require
1889  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1890  * were loaded. Also flush IRQ workqueues and disable them.
1891  */
1892 static int dm_hw_fini(void *handle)
1893 {
1894 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1895 
1896 	amdgpu_dm_hpd_fini(adev);
1897 
1898 	amdgpu_dm_irq_fini(adev);
1899 	amdgpu_dm_fini(adev);
1900 	return 0;
1901 }
1902 
1903 
1904 static int dm_enable_vblank(struct drm_crtc *crtc);
1905 static void dm_disable_vblank(struct drm_crtc *crtc);
1906 
1907 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1908 				 struct dc_state *state, bool enable)
1909 {
1910 	enum dc_irq_source irq_source;
1911 	struct amdgpu_crtc *acrtc;
1912 	int rc = -EBUSY;
1913 	int i = 0;
1914 
1915 	for (i = 0; i < state->stream_count; i++) {
1916 		acrtc = get_crtc_by_otg_inst(
1917 				adev, state->stream_status[i].primary_otg_inst);
1918 
1919 		if (acrtc && state->stream_status[i].plane_count != 0) {
1920 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1921 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1922 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1923 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1924 			if (rc)
1925 				DRM_WARN("Failed to %s pflip interrupts\n",
1926 					 enable ? "enable" : "disable");
1927 
1928 			if (enable) {
1929 				rc = dm_enable_vblank(&acrtc->base);
1930 				if (rc)
1931 					DRM_WARN("Failed to enable vblank interrupts\n");
1932 			} else {
1933 				dm_disable_vblank(&acrtc->base);
1934 			}
1935 
1936 		}
1937 	}
1938 
1939 }
1940 
1941 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1942 {
1943 	struct dc_state *context = NULL;
1944 	enum dc_status res = DC_ERROR_UNEXPECTED;
1945 	int i;
1946 	struct dc_stream_state *del_streams[MAX_PIPES];
1947 	int del_streams_count = 0;
1948 
1949 	memset(del_streams, 0, sizeof(del_streams));
1950 
1951 	context = dc_create_state(dc);
1952 	if (context == NULL)
1953 		goto context_alloc_fail;
1954 
1955 	dc_resource_state_copy_construct_current(dc, context);
1956 
1957 	/* First remove from context all streams */
1958 	for (i = 0; i < context->stream_count; i++) {
1959 		struct dc_stream_state *stream = context->streams[i];
1960 
1961 		del_streams[del_streams_count++] = stream;
1962 	}
1963 
1964 	/* Remove all planes for removed streams and then remove the streams */
1965 	for (i = 0; i < del_streams_count; i++) {
1966 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1967 			res = DC_FAIL_DETACH_SURFACES;
1968 			goto fail;
1969 		}
1970 
1971 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1972 		if (res != DC_OK)
1973 			goto fail;
1974 	}
1975 
1976 
1977 	res = dc_validate_global_state(dc, context, false);
1978 
1979 	if (res != DC_OK) {
1980 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1981 		goto fail;
1982 	}
1983 
1984 	res = dc_commit_state(dc, context);
1985 
1986 fail:
1987 	dc_release_state(context);
1988 
1989 context_alloc_fail:
1990 	return res;
1991 }
1992 
1993 static int dm_suspend(void *handle)
1994 {
1995 	struct amdgpu_device *adev = handle;
1996 	struct amdgpu_display_manager *dm = &adev->dm;
1997 	int ret = 0;
1998 
1999 	if (amdgpu_in_reset(adev)) {
2000 		mutex_lock(&dm->dc_lock);
2001 
2002 #if defined(CONFIG_DRM_AMD_DC_DCN)
2003 		dc_allow_idle_optimizations(adev->dm.dc, false);
2004 #endif
2005 
2006 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2007 
2008 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2009 
2010 		amdgpu_dm_commit_zero_streams(dm->dc);
2011 
2012 		amdgpu_dm_irq_suspend(adev);
2013 
2014 		return ret;
2015 	}
2016 
2017 	WARN_ON(adev->dm.cached_state);
2018 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2019 
2020 	s3_handle_mst(adev_to_drm(adev), true);
2021 
2022 	amdgpu_dm_irq_suspend(adev);
2023 
2024 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2025 
2026 	return 0;
2027 }
2028 
2029 static struct amdgpu_dm_connector *
2030 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2031 					     struct drm_crtc *crtc)
2032 {
2033 	uint32_t i;
2034 	struct drm_connector_state *new_con_state;
2035 	struct drm_connector *connector;
2036 	struct drm_crtc *crtc_from_state;
2037 
2038 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2039 		crtc_from_state = new_con_state->crtc;
2040 
2041 		if (crtc_from_state == crtc)
2042 			return to_amdgpu_dm_connector(connector);
2043 	}
2044 
2045 	return NULL;
2046 }
2047 
2048 static void emulated_link_detect(struct dc_link *link)
2049 {
2050 	struct dc_sink_init_data sink_init_data = { 0 };
2051 	struct display_sink_capability sink_caps = { 0 };
2052 	enum dc_edid_status edid_status;
2053 	struct dc_context *dc_ctx = link->ctx;
2054 	struct dc_sink *sink = NULL;
2055 	struct dc_sink *prev_sink = NULL;
2056 
2057 	link->type = dc_connection_none;
2058 	prev_sink = link->local_sink;
2059 
2060 	if (prev_sink)
2061 		dc_sink_release(prev_sink);
2062 
2063 	switch (link->connector_signal) {
2064 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2065 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2067 		break;
2068 	}
2069 
2070 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2071 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2072 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2073 		break;
2074 	}
2075 
2076 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2077 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2078 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2079 		break;
2080 	}
2081 
2082 	case SIGNAL_TYPE_LVDS: {
2083 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2084 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2085 		break;
2086 	}
2087 
2088 	case SIGNAL_TYPE_EDP: {
2089 		sink_caps.transaction_type =
2090 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2091 		sink_caps.signal = SIGNAL_TYPE_EDP;
2092 		break;
2093 	}
2094 
2095 	case SIGNAL_TYPE_DISPLAY_PORT: {
2096 		sink_caps.transaction_type =
2097 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2098 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2099 		break;
2100 	}
2101 
2102 	default:
2103 		DC_ERROR("Invalid connector type! signal:%d\n",
2104 			link->connector_signal);
2105 		return;
2106 	}
2107 
2108 	sink_init_data.link = link;
2109 	sink_init_data.sink_signal = sink_caps.signal;
2110 
2111 	sink = dc_sink_create(&sink_init_data);
2112 	if (!sink) {
2113 		DC_ERROR("Failed to create sink!\n");
2114 		return;
2115 	}
2116 
2117 	/* dc_sink_create returns a new reference */
2118 	link->local_sink = sink;
2119 
2120 	edid_status = dm_helpers_read_local_edid(
2121 			link->ctx,
2122 			link,
2123 			sink);
2124 
2125 	if (edid_status != EDID_OK)
2126 		DC_ERROR("Failed to read EDID");
2127 
2128 }
2129 
2130 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2131 				     struct amdgpu_display_manager *dm)
2132 {
2133 	struct {
2134 		struct dc_surface_update surface_updates[MAX_SURFACES];
2135 		struct dc_plane_info plane_infos[MAX_SURFACES];
2136 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2137 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2138 		struct dc_stream_update stream_update;
2139 	} * bundle;
2140 	int k, m;
2141 
2142 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2143 
2144 	if (!bundle) {
2145 		dm_error("Failed to allocate update bundle\n");
2146 		goto cleanup;
2147 	}
2148 
2149 	for (k = 0; k < dc_state->stream_count; k++) {
2150 		bundle->stream_update.stream = dc_state->streams[k];
2151 
2152 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2153 			bundle->surface_updates[m].surface =
2154 				dc_state->stream_status->plane_states[m];
2155 			bundle->surface_updates[m].surface->force_full_update =
2156 				true;
2157 		}
2158 		dc_commit_updates_for_stream(
2159 			dm->dc, bundle->surface_updates,
2160 			dc_state->stream_status->plane_count,
2161 			dc_state->streams[k], &bundle->stream_update, dc_state);
2162 	}
2163 
2164 cleanup:
2165 	kfree(bundle);
2166 
2167 	return;
2168 }
2169 
2170 static void dm_set_dpms_off(struct dc_link *link)
2171 {
2172 	struct dc_stream_state *stream_state;
2173 	struct amdgpu_dm_connector *aconnector = link->priv;
2174 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2175 	struct dc_stream_update stream_update;
2176 	bool dpms_off = true;
2177 
2178 	memset(&stream_update, 0, sizeof(stream_update));
2179 	stream_update.dpms_off = &dpms_off;
2180 
2181 	mutex_lock(&adev->dm.dc_lock);
2182 	stream_state = dc_stream_find_from_link(link);
2183 
2184 	if (stream_state == NULL) {
2185 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2186 		mutex_unlock(&adev->dm.dc_lock);
2187 		return;
2188 	}
2189 
2190 	stream_update.stream = stream_state;
2191 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2192 				     stream_state, &stream_update,
2193 				     stream_state->ctx->dc->current_state);
2194 	mutex_unlock(&adev->dm.dc_lock);
2195 }
2196 
2197 static int dm_resume(void *handle)
2198 {
2199 	struct amdgpu_device *adev = handle;
2200 	struct drm_device *ddev = adev_to_drm(adev);
2201 	struct amdgpu_display_manager *dm = &adev->dm;
2202 	struct amdgpu_dm_connector *aconnector;
2203 	struct drm_connector *connector;
2204 	struct drm_connector_list_iter iter;
2205 	struct drm_crtc *crtc;
2206 	struct drm_crtc_state *new_crtc_state;
2207 	struct dm_crtc_state *dm_new_crtc_state;
2208 	struct drm_plane *plane;
2209 	struct drm_plane_state *new_plane_state;
2210 	struct dm_plane_state *dm_new_plane_state;
2211 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2212 	enum dc_connection_type new_connection_type = dc_connection_none;
2213 	struct dc_state *dc_state;
2214 	int i, r, j;
2215 
2216 	if (amdgpu_in_reset(adev)) {
2217 		dc_state = dm->cached_dc_state;
2218 
2219 		r = dm_dmub_hw_init(adev);
2220 		if (r)
2221 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2222 
2223 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2224 		dc_resume(dm->dc);
2225 
2226 		amdgpu_dm_irq_resume_early(adev);
2227 
2228 		for (i = 0; i < dc_state->stream_count; i++) {
2229 			dc_state->streams[i]->mode_changed = true;
2230 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2231 				dc_state->stream_status->plane_states[j]->update_flags.raw
2232 					= 0xffffffff;
2233 			}
2234 		}
2235 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
2236 		/*
2237 		 * Resource allocation happens for link encoders for newer ASIC in
2238 		 * dc_validate_global_state, so we need to revalidate it.
2239 		 *
2240 		 * This shouldn't fail (it passed once before), so warn if it does.
2241 		 */
2242 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2243 #endif
2244 
2245 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2246 
2247 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2248 
2249 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2250 
2251 		dc_release_state(dm->cached_dc_state);
2252 		dm->cached_dc_state = NULL;
2253 
2254 		amdgpu_dm_irq_resume_late(adev);
2255 
2256 		mutex_unlock(&dm->dc_lock);
2257 
2258 		return 0;
2259 	}
2260 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2261 	dc_release_state(dm_state->context);
2262 	dm_state->context = dc_create_state(dm->dc);
2263 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2264 	dc_resource_state_construct(dm->dc, dm_state->context);
2265 
2266 	/* Before powering on DC we need to re-initialize DMUB. */
2267 	r = dm_dmub_hw_init(adev);
2268 	if (r)
2269 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2270 
2271 	/* power on hardware */
2272 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2273 
2274 	/* program HPD filter */
2275 	dc_resume(dm->dc);
2276 
2277 	/*
2278 	 * early enable HPD Rx IRQ, should be done before set mode as short
2279 	 * pulse interrupts are used for MST
2280 	 */
2281 	amdgpu_dm_irq_resume_early(adev);
2282 
2283 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2284 	s3_handle_mst(ddev, false);
2285 
2286 	/* Do detection*/
2287 	drm_connector_list_iter_begin(ddev, &iter);
2288 	drm_for_each_connector_iter(connector, &iter) {
2289 		aconnector = to_amdgpu_dm_connector(connector);
2290 
2291 		/*
2292 		 * this is the case when traversing through already created
2293 		 * MST connectors, should be skipped
2294 		 */
2295 		if (aconnector->mst_port)
2296 			continue;
2297 
2298 		mutex_lock(&aconnector->hpd_lock);
2299 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2300 			DRM_ERROR("KMS: Failed to detect connector\n");
2301 
2302 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2303 			emulated_link_detect(aconnector->dc_link);
2304 		else
2305 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2306 
2307 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2308 			aconnector->fake_enable = false;
2309 
2310 		if (aconnector->dc_sink)
2311 			dc_sink_release(aconnector->dc_sink);
2312 		aconnector->dc_sink = NULL;
2313 		amdgpu_dm_update_connector_after_detect(aconnector);
2314 		mutex_unlock(&aconnector->hpd_lock);
2315 	}
2316 	drm_connector_list_iter_end(&iter);
2317 
2318 	/* Force mode set in atomic commit */
2319 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2320 		new_crtc_state->active_changed = true;
2321 
2322 	/*
2323 	 * atomic_check is expected to create the dc states. We need to release
2324 	 * them here, since they were duplicated as part of the suspend
2325 	 * procedure.
2326 	 */
2327 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2328 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2329 		if (dm_new_crtc_state->stream) {
2330 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2331 			dc_stream_release(dm_new_crtc_state->stream);
2332 			dm_new_crtc_state->stream = NULL;
2333 		}
2334 	}
2335 
2336 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2337 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2338 		if (dm_new_plane_state->dc_state) {
2339 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2340 			dc_plane_state_release(dm_new_plane_state->dc_state);
2341 			dm_new_plane_state->dc_state = NULL;
2342 		}
2343 	}
2344 
2345 	drm_atomic_helper_resume(ddev, dm->cached_state);
2346 
2347 	dm->cached_state = NULL;
2348 
2349 	amdgpu_dm_irq_resume_late(adev);
2350 
2351 	amdgpu_dm_smu_write_watermarks_table(adev);
2352 
2353 	return 0;
2354 }
2355 
2356 /**
2357  * DOC: DM Lifecycle
2358  *
2359  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2360  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2361  * the base driver's device list to be initialized and torn down accordingly.
2362  *
2363  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2364  */
2365 
2366 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2367 	.name = "dm",
2368 	.early_init = dm_early_init,
2369 	.late_init = dm_late_init,
2370 	.sw_init = dm_sw_init,
2371 	.sw_fini = dm_sw_fini,
2372 	.early_fini = amdgpu_dm_early_fini,
2373 	.hw_init = dm_hw_init,
2374 	.hw_fini = dm_hw_fini,
2375 	.suspend = dm_suspend,
2376 	.resume = dm_resume,
2377 	.is_idle = dm_is_idle,
2378 	.wait_for_idle = dm_wait_for_idle,
2379 	.check_soft_reset = dm_check_soft_reset,
2380 	.soft_reset = dm_soft_reset,
2381 	.set_clockgating_state = dm_set_clockgating_state,
2382 	.set_powergating_state = dm_set_powergating_state,
2383 };
2384 
2385 const struct amdgpu_ip_block_version dm_ip_block =
2386 {
2387 	.type = AMD_IP_BLOCK_TYPE_DCE,
2388 	.major = 1,
2389 	.minor = 0,
2390 	.rev = 0,
2391 	.funcs = &amdgpu_dm_funcs,
2392 };
2393 
2394 
2395 /**
2396  * DOC: atomic
2397  *
2398  * *WIP*
2399  */
2400 
2401 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2402 	.fb_create = amdgpu_display_user_framebuffer_create,
2403 	.get_format_info = amd_get_format_info,
2404 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2405 	.atomic_check = amdgpu_dm_atomic_check,
2406 	.atomic_commit = drm_atomic_helper_commit,
2407 };
2408 
2409 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2410 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2411 };
2412 
2413 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2414 {
2415 	u32 max_cll, min_cll, max, min, q, r;
2416 	struct amdgpu_dm_backlight_caps *caps;
2417 	struct amdgpu_display_manager *dm;
2418 	struct drm_connector *conn_base;
2419 	struct amdgpu_device *adev;
2420 	struct dc_link *link = NULL;
2421 	static const u8 pre_computed_values[] = {
2422 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2423 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2424 
2425 	if (!aconnector || !aconnector->dc_link)
2426 		return;
2427 
2428 	link = aconnector->dc_link;
2429 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2430 		return;
2431 
2432 	conn_base = &aconnector->base;
2433 	adev = drm_to_adev(conn_base->dev);
2434 	dm = &adev->dm;
2435 	caps = &dm->backlight_caps;
2436 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2437 	caps->aux_support = false;
2438 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2439 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2440 
2441 	if (caps->ext_caps->bits.oled == 1 ||
2442 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2443 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2444 		caps->aux_support = true;
2445 
2446 	if (amdgpu_backlight == 0)
2447 		caps->aux_support = false;
2448 	else if (amdgpu_backlight == 1)
2449 		caps->aux_support = true;
2450 
2451 	/* From the specification (CTA-861-G), for calculating the maximum
2452 	 * luminance we need to use:
2453 	 *	Luminance = 50*2**(CV/32)
2454 	 * Where CV is a one-byte value.
2455 	 * For calculating this expression we may need float point precision;
2456 	 * to avoid this complexity level, we take advantage that CV is divided
2457 	 * by a constant. From the Euclids division algorithm, we know that CV
2458 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2459 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2460 	 * need to pre-compute the value of r/32. For pre-computing the values
2461 	 * We just used the following Ruby line:
2462 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2463 	 * The results of the above expressions can be verified at
2464 	 * pre_computed_values.
2465 	 */
2466 	q = max_cll >> 5;
2467 	r = max_cll % 32;
2468 	max = (1 << q) * pre_computed_values[r];
2469 
2470 	// min luminance: maxLum * (CV/255)^2 / 100
2471 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2472 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2473 
2474 	caps->aux_max_input_signal = max;
2475 	caps->aux_min_input_signal = min;
2476 }
2477 
2478 void amdgpu_dm_update_connector_after_detect(
2479 		struct amdgpu_dm_connector *aconnector)
2480 {
2481 	struct drm_connector *connector = &aconnector->base;
2482 	struct drm_device *dev = connector->dev;
2483 	struct dc_sink *sink;
2484 
2485 	/* MST handled by drm_mst framework */
2486 	if (aconnector->mst_mgr.mst_state == true)
2487 		return;
2488 
2489 	sink = aconnector->dc_link->local_sink;
2490 	if (sink)
2491 		dc_sink_retain(sink);
2492 
2493 	/*
2494 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2495 	 * the connector sink is set to either fake or physical sink depends on link status.
2496 	 * Skip if already done during boot.
2497 	 */
2498 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2499 			&& aconnector->dc_em_sink) {
2500 
2501 		/*
2502 		 * For S3 resume with headless use eml_sink to fake stream
2503 		 * because on resume connector->sink is set to NULL
2504 		 */
2505 		mutex_lock(&dev->mode_config.mutex);
2506 
2507 		if (sink) {
2508 			if (aconnector->dc_sink) {
2509 				amdgpu_dm_update_freesync_caps(connector, NULL);
2510 				/*
2511 				 * retain and release below are used to
2512 				 * bump up refcount for sink because the link doesn't point
2513 				 * to it anymore after disconnect, so on next crtc to connector
2514 				 * reshuffle by UMD we will get into unwanted dc_sink release
2515 				 */
2516 				dc_sink_release(aconnector->dc_sink);
2517 			}
2518 			aconnector->dc_sink = sink;
2519 			dc_sink_retain(aconnector->dc_sink);
2520 			amdgpu_dm_update_freesync_caps(connector,
2521 					aconnector->edid);
2522 		} else {
2523 			amdgpu_dm_update_freesync_caps(connector, NULL);
2524 			if (!aconnector->dc_sink) {
2525 				aconnector->dc_sink = aconnector->dc_em_sink;
2526 				dc_sink_retain(aconnector->dc_sink);
2527 			}
2528 		}
2529 
2530 		mutex_unlock(&dev->mode_config.mutex);
2531 
2532 		if (sink)
2533 			dc_sink_release(sink);
2534 		return;
2535 	}
2536 
2537 	/*
2538 	 * TODO: temporary guard to look for proper fix
2539 	 * if this sink is MST sink, we should not do anything
2540 	 */
2541 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2542 		dc_sink_release(sink);
2543 		return;
2544 	}
2545 
2546 	if (aconnector->dc_sink == sink) {
2547 		/*
2548 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2549 		 * Do nothing!!
2550 		 */
2551 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2552 				aconnector->connector_id);
2553 		if (sink)
2554 			dc_sink_release(sink);
2555 		return;
2556 	}
2557 
2558 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2559 		aconnector->connector_id, aconnector->dc_sink, sink);
2560 
2561 	mutex_lock(&dev->mode_config.mutex);
2562 
2563 	/*
2564 	 * 1. Update status of the drm connector
2565 	 * 2. Send an event and let userspace tell us what to do
2566 	 */
2567 	if (sink) {
2568 		/*
2569 		 * TODO: check if we still need the S3 mode update workaround.
2570 		 * If yes, put it here.
2571 		 */
2572 		if (aconnector->dc_sink) {
2573 			amdgpu_dm_update_freesync_caps(connector, NULL);
2574 			dc_sink_release(aconnector->dc_sink);
2575 		}
2576 
2577 		aconnector->dc_sink = sink;
2578 		dc_sink_retain(aconnector->dc_sink);
2579 		if (sink->dc_edid.length == 0) {
2580 			aconnector->edid = NULL;
2581 			if (aconnector->dc_link->aux_mode) {
2582 				drm_dp_cec_unset_edid(
2583 					&aconnector->dm_dp_aux.aux);
2584 			}
2585 		} else {
2586 			aconnector->edid =
2587 				(struct edid *)sink->dc_edid.raw_edid;
2588 
2589 			drm_connector_update_edid_property(connector,
2590 							   aconnector->edid);
2591 			if (aconnector->dc_link->aux_mode)
2592 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2593 						    aconnector->edid);
2594 		}
2595 
2596 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2597 		update_connector_ext_caps(aconnector);
2598 	} else {
2599 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2600 		amdgpu_dm_update_freesync_caps(connector, NULL);
2601 		drm_connector_update_edid_property(connector, NULL);
2602 		aconnector->num_modes = 0;
2603 		dc_sink_release(aconnector->dc_sink);
2604 		aconnector->dc_sink = NULL;
2605 		aconnector->edid = NULL;
2606 #ifdef CONFIG_DRM_AMD_DC_HDCP
2607 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2608 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2609 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2610 #endif
2611 	}
2612 
2613 	mutex_unlock(&dev->mode_config.mutex);
2614 
2615 	update_subconnector_property(aconnector);
2616 
2617 	if (sink)
2618 		dc_sink_release(sink);
2619 }
2620 
2621 static void handle_hpd_irq(void *param)
2622 {
2623 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2624 	struct drm_connector *connector = &aconnector->base;
2625 	struct drm_device *dev = connector->dev;
2626 	enum dc_connection_type new_connection_type = dc_connection_none;
2627 	struct amdgpu_device *adev = drm_to_adev(dev);
2628 #ifdef CONFIG_DRM_AMD_DC_HDCP
2629 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2630 #endif
2631 
2632 	if (adev->dm.disable_hpd_irq)
2633 		return;
2634 
2635 	/*
2636 	 * In case of failure or MST no need to update connector status or notify the OS
2637 	 * since (for MST case) MST does this in its own context.
2638 	 */
2639 	mutex_lock(&aconnector->hpd_lock);
2640 
2641 #ifdef CONFIG_DRM_AMD_DC_HDCP
2642 	if (adev->dm.hdcp_workqueue) {
2643 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2644 		dm_con_state->update_hdcp = true;
2645 	}
2646 #endif
2647 	if (aconnector->fake_enable)
2648 		aconnector->fake_enable = false;
2649 
2650 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2651 		DRM_ERROR("KMS: Failed to detect connector\n");
2652 
2653 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2654 		emulated_link_detect(aconnector->dc_link);
2655 
2656 
2657 		drm_modeset_lock_all(dev);
2658 		dm_restore_drm_connector_state(dev, connector);
2659 		drm_modeset_unlock_all(dev);
2660 
2661 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2662 			drm_kms_helper_hotplug_event(dev);
2663 
2664 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2665 		if (new_connection_type == dc_connection_none &&
2666 		    aconnector->dc_link->type == dc_connection_none)
2667 			dm_set_dpms_off(aconnector->dc_link);
2668 
2669 		amdgpu_dm_update_connector_after_detect(aconnector);
2670 
2671 		drm_modeset_lock_all(dev);
2672 		dm_restore_drm_connector_state(dev, connector);
2673 		drm_modeset_unlock_all(dev);
2674 
2675 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2676 			drm_kms_helper_hotplug_event(dev);
2677 	}
2678 	mutex_unlock(&aconnector->hpd_lock);
2679 
2680 }
2681 
2682 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2683 {
2684 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2685 	uint8_t dret;
2686 	bool new_irq_handled = false;
2687 	int dpcd_addr;
2688 	int dpcd_bytes_to_read;
2689 
2690 	const int max_process_count = 30;
2691 	int process_count = 0;
2692 
2693 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2694 
2695 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2696 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2697 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2698 		dpcd_addr = DP_SINK_COUNT;
2699 	} else {
2700 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2701 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2702 		dpcd_addr = DP_SINK_COUNT_ESI;
2703 	}
2704 
2705 	dret = drm_dp_dpcd_read(
2706 		&aconnector->dm_dp_aux.aux,
2707 		dpcd_addr,
2708 		esi,
2709 		dpcd_bytes_to_read);
2710 
2711 	while (dret == dpcd_bytes_to_read &&
2712 		process_count < max_process_count) {
2713 		uint8_t retry;
2714 		dret = 0;
2715 
2716 		process_count++;
2717 
2718 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2719 		/* handle HPD short pulse irq */
2720 		if (aconnector->mst_mgr.mst_state)
2721 			drm_dp_mst_hpd_irq(
2722 				&aconnector->mst_mgr,
2723 				esi,
2724 				&new_irq_handled);
2725 
2726 		if (new_irq_handled) {
2727 			/* ACK at DPCD to notify down stream */
2728 			const int ack_dpcd_bytes_to_write =
2729 				dpcd_bytes_to_read - 1;
2730 
2731 			for (retry = 0; retry < 3; retry++) {
2732 				uint8_t wret;
2733 
2734 				wret = drm_dp_dpcd_write(
2735 					&aconnector->dm_dp_aux.aux,
2736 					dpcd_addr + 1,
2737 					&esi[1],
2738 					ack_dpcd_bytes_to_write);
2739 				if (wret == ack_dpcd_bytes_to_write)
2740 					break;
2741 			}
2742 
2743 			/* check if there is new irq to be handled */
2744 			dret = drm_dp_dpcd_read(
2745 				&aconnector->dm_dp_aux.aux,
2746 				dpcd_addr,
2747 				esi,
2748 				dpcd_bytes_to_read);
2749 
2750 			new_irq_handled = false;
2751 		} else {
2752 			break;
2753 		}
2754 	}
2755 
2756 	if (process_count == max_process_count)
2757 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2758 }
2759 
2760 static void handle_hpd_rx_irq(void *param)
2761 {
2762 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2763 	struct drm_connector *connector = &aconnector->base;
2764 	struct drm_device *dev = connector->dev;
2765 	struct dc_link *dc_link = aconnector->dc_link;
2766 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2767 	bool result = false;
2768 	enum dc_connection_type new_connection_type = dc_connection_none;
2769 	struct amdgpu_device *adev = drm_to_adev(dev);
2770 	union hpd_irq_data hpd_irq_data;
2771 	bool lock_flag = 0;
2772 
2773 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2774 
2775 	if (adev->dm.disable_hpd_irq)
2776 		return;
2777 
2778 
2779 	/*
2780 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2781 	 * conflict, after implement i2c helper, this mutex should be
2782 	 * retired.
2783 	 */
2784 	mutex_lock(&aconnector->hpd_lock);
2785 
2786 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2787 
2788 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2789 		(dc_link->type == dc_connection_mst_branch)) {
2790 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2791 			result = true;
2792 			dm_handle_hpd_rx_irq(aconnector);
2793 			goto out;
2794 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2795 			result = false;
2796 			dm_handle_hpd_rx_irq(aconnector);
2797 			goto out;
2798 		}
2799 	}
2800 
2801 	/*
2802 	 * TODO: We need the lock to avoid touching DC state while it's being
2803 	 * modified during automated compliance testing, or when link loss
2804 	 * happens. While this should be split into subhandlers and proper
2805 	 * interfaces to avoid having to conditionally lock like this in the
2806 	 * outer layer, we need this workaround temporarily to allow MST
2807 	 * lightup in some scenarios to avoid timeout.
2808 	 */
2809 	if (!amdgpu_in_reset(adev) &&
2810 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2811 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2812 		mutex_lock(&adev->dm.dc_lock);
2813 		lock_flag = 1;
2814 	}
2815 
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2818 #else
2819 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2820 #endif
2821 	if (!amdgpu_in_reset(adev) && lock_flag)
2822 		mutex_unlock(&adev->dm.dc_lock);
2823 
2824 out:
2825 	if (result && !is_mst_root_connector) {
2826 		/* Downstream Port status changed. */
2827 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2828 			DRM_ERROR("KMS: Failed to detect connector\n");
2829 
2830 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2831 			emulated_link_detect(dc_link);
2832 
2833 			if (aconnector->fake_enable)
2834 				aconnector->fake_enable = false;
2835 
2836 			amdgpu_dm_update_connector_after_detect(aconnector);
2837 
2838 
2839 			drm_modeset_lock_all(dev);
2840 			dm_restore_drm_connector_state(dev, connector);
2841 			drm_modeset_unlock_all(dev);
2842 
2843 			drm_kms_helper_hotplug_event(dev);
2844 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2845 
2846 			if (aconnector->fake_enable)
2847 				aconnector->fake_enable = false;
2848 
2849 			amdgpu_dm_update_connector_after_detect(aconnector);
2850 
2851 
2852 			drm_modeset_lock_all(dev);
2853 			dm_restore_drm_connector_state(dev, connector);
2854 			drm_modeset_unlock_all(dev);
2855 
2856 			drm_kms_helper_hotplug_event(dev);
2857 		}
2858 	}
2859 #ifdef CONFIG_DRM_AMD_DC_HDCP
2860 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2861 		if (adev->dm.hdcp_workqueue)
2862 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2863 	}
2864 #endif
2865 
2866 	if (dc_link->type != dc_connection_mst_branch)
2867 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2868 
2869 	mutex_unlock(&aconnector->hpd_lock);
2870 }
2871 
2872 static void register_hpd_handlers(struct amdgpu_device *adev)
2873 {
2874 	struct drm_device *dev = adev_to_drm(adev);
2875 	struct drm_connector *connector;
2876 	struct amdgpu_dm_connector *aconnector;
2877 	const struct dc_link *dc_link;
2878 	struct dc_interrupt_params int_params = {0};
2879 
2880 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2881 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2882 
2883 	list_for_each_entry(connector,
2884 			&dev->mode_config.connector_list, head)	{
2885 
2886 		aconnector = to_amdgpu_dm_connector(connector);
2887 		dc_link = aconnector->dc_link;
2888 
2889 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2890 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2891 			int_params.irq_source = dc_link->irq_source_hpd;
2892 
2893 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2894 					handle_hpd_irq,
2895 					(void *) aconnector);
2896 		}
2897 
2898 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2899 
2900 			/* Also register for DP short pulse (hpd_rx). */
2901 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2902 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2903 
2904 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2905 					handle_hpd_rx_irq,
2906 					(void *) aconnector);
2907 		}
2908 	}
2909 }
2910 
2911 #if defined(CONFIG_DRM_AMD_DC_SI)
2912 /* Register IRQ sources and initialize IRQ callbacks */
2913 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2914 {
2915 	struct dc *dc = adev->dm.dc;
2916 	struct common_irq_params *c_irq_params;
2917 	struct dc_interrupt_params int_params = {0};
2918 	int r;
2919 	int i;
2920 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2921 
2922 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2923 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2924 
2925 	/*
2926 	 * Actions of amdgpu_irq_add_id():
2927 	 * 1. Register a set() function with base driver.
2928 	 *    Base driver will call set() function to enable/disable an
2929 	 *    interrupt in DC hardware.
2930 	 * 2. Register amdgpu_dm_irq_handler().
2931 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2932 	 *    coming from DC hardware.
2933 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2934 	 *    for acknowledging and handling. */
2935 
2936 	/* Use VBLANK interrupt */
2937 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2938 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2939 		if (r) {
2940 			DRM_ERROR("Failed to add crtc irq id!\n");
2941 			return r;
2942 		}
2943 
2944 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945 		int_params.irq_source =
2946 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2947 
2948 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2949 
2950 		c_irq_params->adev = adev;
2951 		c_irq_params->irq_src = int_params.irq_source;
2952 
2953 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954 				dm_crtc_high_irq, c_irq_params);
2955 	}
2956 
2957 	/* Use GRPH_PFLIP interrupt */
2958 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2959 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2960 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2961 		if (r) {
2962 			DRM_ERROR("Failed to add page flip irq id!\n");
2963 			return r;
2964 		}
2965 
2966 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2967 		int_params.irq_source =
2968 			dc_interrupt_to_irq_source(dc, i, 0);
2969 
2970 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2971 
2972 		c_irq_params->adev = adev;
2973 		c_irq_params->irq_src = int_params.irq_source;
2974 
2975 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2976 				dm_pflip_high_irq, c_irq_params);
2977 
2978 	}
2979 
2980 	/* HPD */
2981 	r = amdgpu_irq_add_id(adev, client_id,
2982 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2983 	if (r) {
2984 		DRM_ERROR("Failed to add hpd irq id!\n");
2985 		return r;
2986 	}
2987 
2988 	register_hpd_handlers(adev);
2989 
2990 	return 0;
2991 }
2992 #endif
2993 
2994 /* Register IRQ sources and initialize IRQ callbacks */
2995 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2996 {
2997 	struct dc *dc = adev->dm.dc;
2998 	struct common_irq_params *c_irq_params;
2999 	struct dc_interrupt_params int_params = {0};
3000 	int r;
3001 	int i;
3002 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3003 
3004 	if (adev->asic_type >= CHIP_VEGA10)
3005 		client_id = SOC15_IH_CLIENTID_DCE;
3006 
3007 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3008 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3009 
3010 	/*
3011 	 * Actions of amdgpu_irq_add_id():
3012 	 * 1. Register a set() function with base driver.
3013 	 *    Base driver will call set() function to enable/disable an
3014 	 *    interrupt in DC hardware.
3015 	 * 2. Register amdgpu_dm_irq_handler().
3016 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3017 	 *    coming from DC hardware.
3018 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3019 	 *    for acknowledging and handling. */
3020 
3021 	/* Use VBLANK interrupt */
3022 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3023 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3024 		if (r) {
3025 			DRM_ERROR("Failed to add crtc irq id!\n");
3026 			return r;
3027 		}
3028 
3029 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 		int_params.irq_source =
3031 			dc_interrupt_to_irq_source(dc, i, 0);
3032 
3033 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3034 
3035 		c_irq_params->adev = adev;
3036 		c_irq_params->irq_src = int_params.irq_source;
3037 
3038 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 				dm_crtc_high_irq, c_irq_params);
3040 	}
3041 
3042 	/* Use VUPDATE interrupt */
3043 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3044 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3045 		if (r) {
3046 			DRM_ERROR("Failed to add vupdate irq id!\n");
3047 			return r;
3048 		}
3049 
3050 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3051 		int_params.irq_source =
3052 			dc_interrupt_to_irq_source(dc, i, 0);
3053 
3054 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3055 
3056 		c_irq_params->adev = adev;
3057 		c_irq_params->irq_src = int_params.irq_source;
3058 
3059 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3060 				dm_vupdate_high_irq, c_irq_params);
3061 	}
3062 
3063 	/* Use GRPH_PFLIP interrupt */
3064 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3065 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3066 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3067 		if (r) {
3068 			DRM_ERROR("Failed to add page flip irq id!\n");
3069 			return r;
3070 		}
3071 
3072 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3073 		int_params.irq_source =
3074 			dc_interrupt_to_irq_source(dc, i, 0);
3075 
3076 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3077 
3078 		c_irq_params->adev = adev;
3079 		c_irq_params->irq_src = int_params.irq_source;
3080 
3081 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3082 				dm_pflip_high_irq, c_irq_params);
3083 
3084 	}
3085 
3086 	/* HPD */
3087 	r = amdgpu_irq_add_id(adev, client_id,
3088 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3089 	if (r) {
3090 		DRM_ERROR("Failed to add hpd irq id!\n");
3091 		return r;
3092 	}
3093 
3094 	register_hpd_handlers(adev);
3095 
3096 	return 0;
3097 }
3098 
3099 #if defined(CONFIG_DRM_AMD_DC_DCN)
3100 /* Register IRQ sources and initialize IRQ callbacks */
3101 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3102 {
3103 	struct dc *dc = adev->dm.dc;
3104 	struct common_irq_params *c_irq_params;
3105 	struct dc_interrupt_params int_params = {0};
3106 	int r;
3107 	int i;
3108 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3109 	static const unsigned int vrtl_int_srcid[] = {
3110 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3111 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3112 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3113 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3114 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3115 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3116 	};
3117 #endif
3118 
3119 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3120 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3121 
3122 	/*
3123 	 * Actions of amdgpu_irq_add_id():
3124 	 * 1. Register a set() function with base driver.
3125 	 *    Base driver will call set() function to enable/disable an
3126 	 *    interrupt in DC hardware.
3127 	 * 2. Register amdgpu_dm_irq_handler().
3128 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3129 	 *    coming from DC hardware.
3130 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3131 	 *    for acknowledging and handling.
3132 	 */
3133 
3134 	/* Use VSTARTUP interrupt */
3135 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3136 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3137 			i++) {
3138 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3139 
3140 		if (r) {
3141 			DRM_ERROR("Failed to add crtc irq id!\n");
3142 			return r;
3143 		}
3144 
3145 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3146 		int_params.irq_source =
3147 			dc_interrupt_to_irq_source(dc, i, 0);
3148 
3149 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3150 
3151 		c_irq_params->adev = adev;
3152 		c_irq_params->irq_src = int_params.irq_source;
3153 
3154 		amdgpu_dm_irq_register_interrupt(
3155 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3156 	}
3157 
3158 	/* Use otg vertical line interrupt */
3159 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3160 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3161 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3162 				vrtl_int_srcid[i], &adev->vline0_irq);
3163 
3164 		if (r) {
3165 			DRM_ERROR("Failed to add vline0 irq id!\n");
3166 			return r;
3167 		}
3168 
3169 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3170 		int_params.irq_source =
3171 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3172 
3173 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3174 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3175 			break;
3176 		}
3177 
3178 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3179 					- DC_IRQ_SOURCE_DC1_VLINE0];
3180 
3181 		c_irq_params->adev = adev;
3182 		c_irq_params->irq_src = int_params.irq_source;
3183 
3184 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3185 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3186 	}
3187 #endif
3188 
3189 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3190 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3191 	 * to trigger at end of each vblank, regardless of state of the lock,
3192 	 * matching DCE behaviour.
3193 	 */
3194 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3195 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3196 	     i++) {
3197 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3198 
3199 		if (r) {
3200 			DRM_ERROR("Failed to add vupdate irq id!\n");
3201 			return r;
3202 		}
3203 
3204 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3205 		int_params.irq_source =
3206 			dc_interrupt_to_irq_source(dc, i, 0);
3207 
3208 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3209 
3210 		c_irq_params->adev = adev;
3211 		c_irq_params->irq_src = int_params.irq_source;
3212 
3213 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3214 				dm_vupdate_high_irq, c_irq_params);
3215 	}
3216 
3217 	/* Use GRPH_PFLIP interrupt */
3218 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3219 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3220 			i++) {
3221 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3222 		if (r) {
3223 			DRM_ERROR("Failed to add page flip irq id!\n");
3224 			return r;
3225 		}
3226 
3227 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3228 		int_params.irq_source =
3229 			dc_interrupt_to_irq_source(dc, i, 0);
3230 
3231 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3232 
3233 		c_irq_params->adev = adev;
3234 		c_irq_params->irq_src = int_params.irq_source;
3235 
3236 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3237 				dm_pflip_high_irq, c_irq_params);
3238 
3239 	}
3240 
3241 	/* HPD */
3242 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3243 			&adev->hpd_irq);
3244 	if (r) {
3245 		DRM_ERROR("Failed to add hpd irq id!\n");
3246 		return r;
3247 	}
3248 
3249 	register_hpd_handlers(adev);
3250 
3251 	return 0;
3252 }
3253 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3254 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3255 {
3256 	struct dc *dc = adev->dm.dc;
3257 	struct common_irq_params *c_irq_params;
3258 	struct dc_interrupt_params int_params = {0};
3259 	int r, i;
3260 
3261 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3262 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3263 
3264 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3265 			&adev->dmub_outbox_irq);
3266 	if (r) {
3267 		DRM_ERROR("Failed to add outbox irq id!\n");
3268 		return r;
3269 	}
3270 
3271 	if (dc->ctx->dmub_srv) {
3272 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3273 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3274 		int_params.irq_source =
3275 		dc_interrupt_to_irq_source(dc, i, 0);
3276 
3277 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3278 
3279 		c_irq_params->adev = adev;
3280 		c_irq_params->irq_src = int_params.irq_source;
3281 
3282 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3283 				dm_dmub_outbox1_low_irq, c_irq_params);
3284 	}
3285 
3286 	return 0;
3287 }
3288 #endif
3289 
3290 /*
3291  * Acquires the lock for the atomic state object and returns
3292  * the new atomic state.
3293  *
3294  * This should only be called during atomic check.
3295  */
3296 static int dm_atomic_get_state(struct drm_atomic_state *state,
3297 			       struct dm_atomic_state **dm_state)
3298 {
3299 	struct drm_device *dev = state->dev;
3300 	struct amdgpu_device *adev = drm_to_adev(dev);
3301 	struct amdgpu_display_manager *dm = &adev->dm;
3302 	struct drm_private_state *priv_state;
3303 
3304 	if (*dm_state)
3305 		return 0;
3306 
3307 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3308 	if (IS_ERR(priv_state))
3309 		return PTR_ERR(priv_state);
3310 
3311 	*dm_state = to_dm_atomic_state(priv_state);
3312 
3313 	return 0;
3314 }
3315 
3316 static struct dm_atomic_state *
3317 dm_atomic_get_new_state(struct drm_atomic_state *state)
3318 {
3319 	struct drm_device *dev = state->dev;
3320 	struct amdgpu_device *adev = drm_to_adev(dev);
3321 	struct amdgpu_display_manager *dm = &adev->dm;
3322 	struct drm_private_obj *obj;
3323 	struct drm_private_state *new_obj_state;
3324 	int i;
3325 
3326 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3327 		if (obj->funcs == dm->atomic_obj.funcs)
3328 			return to_dm_atomic_state(new_obj_state);
3329 	}
3330 
3331 	return NULL;
3332 }
3333 
3334 static struct drm_private_state *
3335 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3336 {
3337 	struct dm_atomic_state *old_state, *new_state;
3338 
3339 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3340 	if (!new_state)
3341 		return NULL;
3342 
3343 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3344 
3345 	old_state = to_dm_atomic_state(obj->state);
3346 
3347 	if (old_state && old_state->context)
3348 		new_state->context = dc_copy_state(old_state->context);
3349 
3350 	if (!new_state->context) {
3351 		kfree(new_state);
3352 		return NULL;
3353 	}
3354 
3355 	return &new_state->base;
3356 }
3357 
3358 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3359 				    struct drm_private_state *state)
3360 {
3361 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3362 
3363 	if (dm_state && dm_state->context)
3364 		dc_release_state(dm_state->context);
3365 
3366 	kfree(dm_state);
3367 }
3368 
3369 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3370 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3371 	.atomic_destroy_state = dm_atomic_destroy_state,
3372 };
3373 
3374 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3375 {
3376 	struct dm_atomic_state *state;
3377 	int r;
3378 
3379 	adev->mode_info.mode_config_initialized = true;
3380 
3381 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3382 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3383 
3384 	adev_to_drm(adev)->mode_config.max_width = 16384;
3385 	adev_to_drm(adev)->mode_config.max_height = 16384;
3386 
3387 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3388 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3389 	/* indicates support for immediate flip */
3390 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3391 
3392 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3393 
3394 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3395 	if (!state)
3396 		return -ENOMEM;
3397 
3398 	state->context = dc_create_state(adev->dm.dc);
3399 	if (!state->context) {
3400 		kfree(state);
3401 		return -ENOMEM;
3402 	}
3403 
3404 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3405 
3406 	drm_atomic_private_obj_init(adev_to_drm(adev),
3407 				    &adev->dm.atomic_obj,
3408 				    &state->base,
3409 				    &dm_atomic_state_funcs);
3410 
3411 	r = amdgpu_display_modeset_create_props(adev);
3412 	if (r) {
3413 		dc_release_state(state->context);
3414 		kfree(state);
3415 		return r;
3416 	}
3417 
3418 	r = amdgpu_dm_audio_init(adev);
3419 	if (r) {
3420 		dc_release_state(state->context);
3421 		kfree(state);
3422 		return r;
3423 	}
3424 
3425 	return 0;
3426 }
3427 
3428 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3429 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3430 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3431 
3432 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3433 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3434 
3435 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3436 {
3437 #if defined(CONFIG_ACPI)
3438 	struct amdgpu_dm_backlight_caps caps;
3439 
3440 	memset(&caps, 0, sizeof(caps));
3441 
3442 	if (dm->backlight_caps.caps_valid)
3443 		return;
3444 
3445 	amdgpu_acpi_get_backlight_caps(&caps);
3446 	if (caps.caps_valid) {
3447 		dm->backlight_caps.caps_valid = true;
3448 		if (caps.aux_support)
3449 			return;
3450 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3451 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3452 	} else {
3453 		dm->backlight_caps.min_input_signal =
3454 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3455 		dm->backlight_caps.max_input_signal =
3456 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3457 	}
3458 #else
3459 	if (dm->backlight_caps.aux_support)
3460 		return;
3461 
3462 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3463 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3464 #endif
3465 }
3466 
3467 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3468 				unsigned *min, unsigned *max)
3469 {
3470 	if (!caps)
3471 		return 0;
3472 
3473 	if (caps->aux_support) {
3474 		// Firmware limits are in nits, DC API wants millinits.
3475 		*max = 1000 * caps->aux_max_input_signal;
3476 		*min = 1000 * caps->aux_min_input_signal;
3477 	} else {
3478 		// Firmware limits are 8-bit, PWM control is 16-bit.
3479 		*max = 0x101 * caps->max_input_signal;
3480 		*min = 0x101 * caps->min_input_signal;
3481 	}
3482 	return 1;
3483 }
3484 
3485 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3486 					uint32_t brightness)
3487 {
3488 	unsigned min, max;
3489 
3490 	if (!get_brightness_range(caps, &min, &max))
3491 		return brightness;
3492 
3493 	// Rescale 0..255 to min..max
3494 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3495 				       AMDGPU_MAX_BL_LEVEL);
3496 }
3497 
3498 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3499 				      uint32_t brightness)
3500 {
3501 	unsigned min, max;
3502 
3503 	if (!get_brightness_range(caps, &min, &max))
3504 		return brightness;
3505 
3506 	if (brightness < min)
3507 		return 0;
3508 	// Rescale min..max to 0..255
3509 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3510 				 max - min);
3511 }
3512 
3513 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3514 					 u32 user_brightness)
3515 {
3516 	struct amdgpu_dm_backlight_caps caps;
3517 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3518 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3519 	bool rc;
3520 	int i;
3521 
3522 	amdgpu_dm_update_backlight_caps(dm);
3523 	caps = dm->backlight_caps;
3524 
3525 	for (i = 0; i < dm->num_of_edps; i++) {
3526 		dm->brightness[i] = user_brightness;
3527 		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3528 		link[i] = (struct dc_link *)dm->backlight_link[i];
3529 	}
3530 
3531 	/* Change brightness based on AUX property */
3532 	if (caps.aux_support) {
3533 		for (i = 0; i < dm->num_of_edps; i++) {
3534 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3535 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3536 			if (!rc) {
3537 				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3538 				break;
3539 			}
3540 		}
3541 	} else {
3542 		for (i = 0; i < dm->num_of_edps; i++) {
3543 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3544 			if (!rc) {
3545 				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3546 				break;
3547 			}
3548 		}
3549 	}
3550 
3551 	return rc ? 0 : 1;
3552 }
3553 
3554 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3555 {
3556 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3557 
3558 	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3559 
3560 	return 0;
3561 }
3562 
3563 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3564 {
3565 	struct amdgpu_dm_backlight_caps caps;
3566 
3567 	amdgpu_dm_update_backlight_caps(dm);
3568 	caps = dm->backlight_caps;
3569 
3570 	if (caps.aux_support) {
3571 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3572 		u32 avg, peak;
3573 		bool rc;
3574 
3575 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3576 		if (!rc)
3577 			return dm->brightness[0];
3578 		return convert_brightness_to_user(&caps, avg);
3579 	} else {
3580 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3581 
3582 		if (ret == DC_ERROR_UNEXPECTED)
3583 			return dm->brightness[0];
3584 		return convert_brightness_to_user(&caps, ret);
3585 	}
3586 }
3587 
3588 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3589 {
3590 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3591 
3592 	return amdgpu_dm_backlight_get_level(dm);
3593 }
3594 
3595 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3596 	.options = BL_CORE_SUSPENDRESUME,
3597 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3598 	.update_status	= amdgpu_dm_backlight_update_status,
3599 };
3600 
3601 static void
3602 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3603 {
3604 	char bl_name[16];
3605 	struct backlight_properties props = { 0 };
3606 	int i;
3607 
3608 	amdgpu_dm_update_backlight_caps(dm);
3609 	for (i = 0; i < dm->num_of_edps; i++)
3610 		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3611 
3612 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3613 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3614 	props.type = BACKLIGHT_RAW;
3615 
3616 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3617 		 adev_to_drm(dm->adev)->primary->index);
3618 
3619 	dm->backlight_dev = backlight_device_register(bl_name,
3620 						      adev_to_drm(dm->adev)->dev,
3621 						      dm,
3622 						      &amdgpu_dm_backlight_ops,
3623 						      &props);
3624 
3625 	if (IS_ERR(dm->backlight_dev))
3626 		DRM_ERROR("DM: Backlight registration failed!\n");
3627 	else
3628 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3629 }
3630 
3631 #endif
3632 
3633 static int initialize_plane(struct amdgpu_display_manager *dm,
3634 			    struct amdgpu_mode_info *mode_info, int plane_id,
3635 			    enum drm_plane_type plane_type,
3636 			    const struct dc_plane_cap *plane_cap)
3637 {
3638 	struct drm_plane *plane;
3639 	unsigned long possible_crtcs;
3640 	int ret = 0;
3641 
3642 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3643 	if (!plane) {
3644 		DRM_ERROR("KMS: Failed to allocate plane\n");
3645 		return -ENOMEM;
3646 	}
3647 	plane->type = plane_type;
3648 
3649 	/*
3650 	 * HACK: IGT tests expect that the primary plane for a CRTC
3651 	 * can only have one possible CRTC. Only expose support for
3652 	 * any CRTC if they're not going to be used as a primary plane
3653 	 * for a CRTC - like overlay or underlay planes.
3654 	 */
3655 	possible_crtcs = 1 << plane_id;
3656 	if (plane_id >= dm->dc->caps.max_streams)
3657 		possible_crtcs = 0xff;
3658 
3659 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3660 
3661 	if (ret) {
3662 		DRM_ERROR("KMS: Failed to initialize plane\n");
3663 		kfree(plane);
3664 		return ret;
3665 	}
3666 
3667 	if (mode_info)
3668 		mode_info->planes[plane_id] = plane;
3669 
3670 	return ret;
3671 }
3672 
3673 
3674 static void register_backlight_device(struct amdgpu_display_manager *dm,
3675 				      struct dc_link *link)
3676 {
3677 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3678 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3679 
3680 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3681 	    link->type != dc_connection_none) {
3682 		/*
3683 		 * Event if registration failed, we should continue with
3684 		 * DM initialization because not having a backlight control
3685 		 * is better then a black screen.
3686 		 */
3687 		if (!dm->backlight_dev)
3688 			amdgpu_dm_register_backlight_device(dm);
3689 
3690 		if (dm->backlight_dev) {
3691 			dm->backlight_link[dm->num_of_edps] = link;
3692 			dm->num_of_edps++;
3693 		}
3694 	}
3695 #endif
3696 }
3697 
3698 
3699 /*
3700  * In this architecture, the association
3701  * connector -> encoder -> crtc
3702  * id not really requried. The crtc and connector will hold the
3703  * display_index as an abstraction to use with DAL component
3704  *
3705  * Returns 0 on success
3706  */
3707 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3708 {
3709 	struct amdgpu_display_manager *dm = &adev->dm;
3710 	int32_t i;
3711 	struct amdgpu_dm_connector *aconnector = NULL;
3712 	struct amdgpu_encoder *aencoder = NULL;
3713 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3714 	uint32_t link_cnt;
3715 	int32_t primary_planes;
3716 	enum dc_connection_type new_connection_type = dc_connection_none;
3717 	const struct dc_plane_cap *plane;
3718 
3719 	dm->display_indexes_num = dm->dc->caps.max_streams;
3720 	/* Update the actual used number of crtc */
3721 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3722 
3723 	link_cnt = dm->dc->caps.max_links;
3724 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3725 		DRM_ERROR("DM: Failed to initialize mode config\n");
3726 		return -EINVAL;
3727 	}
3728 
3729 	/* There is one primary plane per CRTC */
3730 	primary_planes = dm->dc->caps.max_streams;
3731 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3732 
3733 	/*
3734 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3735 	 * Order is reversed to match iteration order in atomic check.
3736 	 */
3737 	for (i = (primary_planes - 1); i >= 0; i--) {
3738 		plane = &dm->dc->caps.planes[i];
3739 
3740 		if (initialize_plane(dm, mode_info, i,
3741 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3742 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3743 			goto fail;
3744 		}
3745 	}
3746 
3747 	/*
3748 	 * Initialize overlay planes, index starting after primary planes.
3749 	 * These planes have a higher DRM index than the primary planes since
3750 	 * they should be considered as having a higher z-order.
3751 	 * Order is reversed to match iteration order in atomic check.
3752 	 *
3753 	 * Only support DCN for now, and only expose one so we don't encourage
3754 	 * userspace to use up all the pipes.
3755 	 */
3756 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3757 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3758 
3759 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3760 			continue;
3761 
3762 		if (!plane->blends_with_above || !plane->blends_with_below)
3763 			continue;
3764 
3765 		if (!plane->pixel_format_support.argb8888)
3766 			continue;
3767 
3768 		if (initialize_plane(dm, NULL, primary_planes + i,
3769 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3770 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3771 			goto fail;
3772 		}
3773 
3774 		/* Only create one overlay plane. */
3775 		break;
3776 	}
3777 
3778 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3779 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3780 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3781 			goto fail;
3782 		}
3783 
3784 #if defined(CONFIG_DRM_AMD_DC_DCN)
3785 	/* Use Outbox interrupt */
3786 	switch (adev->asic_type) {
3787 	case CHIP_SIENNA_CICHLID:
3788 	case CHIP_NAVY_FLOUNDER:
3789 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3790 	case CHIP_YELLOW_CARP:
3791 #endif
3792 	case CHIP_RENOIR:
3793 		if (register_outbox_irq_handlers(dm->adev)) {
3794 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3795 			goto fail;
3796 		}
3797 		break;
3798 	default:
3799 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3800 	}
3801 #endif
3802 
3803 	/* loops over all connectors on the board */
3804 	for (i = 0; i < link_cnt; i++) {
3805 		struct dc_link *link = NULL;
3806 
3807 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3808 			DRM_ERROR(
3809 				"KMS: Cannot support more than %d display indexes\n",
3810 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3811 			continue;
3812 		}
3813 
3814 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3815 		if (!aconnector)
3816 			goto fail;
3817 
3818 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3819 		if (!aencoder)
3820 			goto fail;
3821 
3822 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3823 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3824 			goto fail;
3825 		}
3826 
3827 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3828 			DRM_ERROR("KMS: Failed to initialize connector\n");
3829 			goto fail;
3830 		}
3831 
3832 		link = dc_get_link_at_index(dm->dc, i);
3833 
3834 		if (!dc_link_detect_sink(link, &new_connection_type))
3835 			DRM_ERROR("KMS: Failed to detect connector\n");
3836 
3837 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3838 			emulated_link_detect(link);
3839 			amdgpu_dm_update_connector_after_detect(aconnector);
3840 
3841 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3842 			amdgpu_dm_update_connector_after_detect(aconnector);
3843 			register_backlight_device(dm, link);
3844 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3845 				amdgpu_dm_set_psr_caps(link);
3846 		}
3847 
3848 
3849 	}
3850 
3851 	/* Software is initialized. Now we can register interrupt handlers. */
3852 	switch (adev->asic_type) {
3853 #if defined(CONFIG_DRM_AMD_DC_SI)
3854 	case CHIP_TAHITI:
3855 	case CHIP_PITCAIRN:
3856 	case CHIP_VERDE:
3857 	case CHIP_OLAND:
3858 		if (dce60_register_irq_handlers(dm->adev)) {
3859 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3860 			goto fail;
3861 		}
3862 		break;
3863 #endif
3864 	case CHIP_BONAIRE:
3865 	case CHIP_HAWAII:
3866 	case CHIP_KAVERI:
3867 	case CHIP_KABINI:
3868 	case CHIP_MULLINS:
3869 	case CHIP_TONGA:
3870 	case CHIP_FIJI:
3871 	case CHIP_CARRIZO:
3872 	case CHIP_STONEY:
3873 	case CHIP_POLARIS11:
3874 	case CHIP_POLARIS10:
3875 	case CHIP_POLARIS12:
3876 	case CHIP_VEGAM:
3877 	case CHIP_VEGA10:
3878 	case CHIP_VEGA12:
3879 	case CHIP_VEGA20:
3880 		if (dce110_register_irq_handlers(dm->adev)) {
3881 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3882 			goto fail;
3883 		}
3884 		break;
3885 #if defined(CONFIG_DRM_AMD_DC_DCN)
3886 	case CHIP_RAVEN:
3887 	case CHIP_NAVI12:
3888 	case CHIP_NAVI10:
3889 	case CHIP_NAVI14:
3890 	case CHIP_RENOIR:
3891 	case CHIP_SIENNA_CICHLID:
3892 	case CHIP_NAVY_FLOUNDER:
3893 	case CHIP_DIMGREY_CAVEFISH:
3894 	case CHIP_BEIGE_GOBY:
3895 	case CHIP_VANGOGH:
3896 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3897 	case CHIP_YELLOW_CARP:
3898 #endif
3899 		if (dcn10_register_irq_handlers(dm->adev)) {
3900 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3901 			goto fail;
3902 		}
3903 		break;
3904 #endif
3905 	default:
3906 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3907 		goto fail;
3908 	}
3909 
3910 	return 0;
3911 fail:
3912 	kfree(aencoder);
3913 	kfree(aconnector);
3914 
3915 	return -EINVAL;
3916 }
3917 
3918 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3919 {
3920 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3921 	return;
3922 }
3923 
3924 /******************************************************************************
3925  * amdgpu_display_funcs functions
3926  *****************************************************************************/
3927 
3928 /*
3929  * dm_bandwidth_update - program display watermarks
3930  *
3931  * @adev: amdgpu_device pointer
3932  *
3933  * Calculate and program the display watermarks and line buffer allocation.
3934  */
3935 static void dm_bandwidth_update(struct amdgpu_device *adev)
3936 {
3937 	/* TODO: implement later */
3938 }
3939 
3940 static const struct amdgpu_display_funcs dm_display_funcs = {
3941 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3942 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3943 	.backlight_set_level = NULL, /* never called for DC */
3944 	.backlight_get_level = NULL, /* never called for DC */
3945 	.hpd_sense = NULL,/* called unconditionally */
3946 	.hpd_set_polarity = NULL, /* called unconditionally */
3947 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3948 	.page_flip_get_scanoutpos =
3949 		dm_crtc_get_scanoutpos,/* called unconditionally */
3950 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3951 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3952 };
3953 
3954 #if defined(CONFIG_DEBUG_KERNEL_DC)
3955 
3956 static ssize_t s3_debug_store(struct device *device,
3957 			      struct device_attribute *attr,
3958 			      const char *buf,
3959 			      size_t count)
3960 {
3961 	int ret;
3962 	int s3_state;
3963 	struct drm_device *drm_dev = dev_get_drvdata(device);
3964 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3965 
3966 	ret = kstrtoint(buf, 0, &s3_state);
3967 
3968 	if (ret == 0) {
3969 		if (s3_state) {
3970 			dm_resume(adev);
3971 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3972 		} else
3973 			dm_suspend(adev);
3974 	}
3975 
3976 	return ret == 0 ? count : 0;
3977 }
3978 
3979 DEVICE_ATTR_WO(s3_debug);
3980 
3981 #endif
3982 
3983 static int dm_early_init(void *handle)
3984 {
3985 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3986 
3987 	switch (adev->asic_type) {
3988 #if defined(CONFIG_DRM_AMD_DC_SI)
3989 	case CHIP_TAHITI:
3990 	case CHIP_PITCAIRN:
3991 	case CHIP_VERDE:
3992 		adev->mode_info.num_crtc = 6;
3993 		adev->mode_info.num_hpd = 6;
3994 		adev->mode_info.num_dig = 6;
3995 		break;
3996 	case CHIP_OLAND:
3997 		adev->mode_info.num_crtc = 2;
3998 		adev->mode_info.num_hpd = 2;
3999 		adev->mode_info.num_dig = 2;
4000 		break;
4001 #endif
4002 	case CHIP_BONAIRE:
4003 	case CHIP_HAWAII:
4004 		adev->mode_info.num_crtc = 6;
4005 		adev->mode_info.num_hpd = 6;
4006 		adev->mode_info.num_dig = 6;
4007 		break;
4008 	case CHIP_KAVERI:
4009 		adev->mode_info.num_crtc = 4;
4010 		adev->mode_info.num_hpd = 6;
4011 		adev->mode_info.num_dig = 7;
4012 		break;
4013 	case CHIP_KABINI:
4014 	case CHIP_MULLINS:
4015 		adev->mode_info.num_crtc = 2;
4016 		adev->mode_info.num_hpd = 6;
4017 		adev->mode_info.num_dig = 6;
4018 		break;
4019 	case CHIP_FIJI:
4020 	case CHIP_TONGA:
4021 		adev->mode_info.num_crtc = 6;
4022 		adev->mode_info.num_hpd = 6;
4023 		adev->mode_info.num_dig = 7;
4024 		break;
4025 	case CHIP_CARRIZO:
4026 		adev->mode_info.num_crtc = 3;
4027 		adev->mode_info.num_hpd = 6;
4028 		adev->mode_info.num_dig = 9;
4029 		break;
4030 	case CHIP_STONEY:
4031 		adev->mode_info.num_crtc = 2;
4032 		adev->mode_info.num_hpd = 6;
4033 		adev->mode_info.num_dig = 9;
4034 		break;
4035 	case CHIP_POLARIS11:
4036 	case CHIP_POLARIS12:
4037 		adev->mode_info.num_crtc = 5;
4038 		adev->mode_info.num_hpd = 5;
4039 		adev->mode_info.num_dig = 5;
4040 		break;
4041 	case CHIP_POLARIS10:
4042 	case CHIP_VEGAM:
4043 		adev->mode_info.num_crtc = 6;
4044 		adev->mode_info.num_hpd = 6;
4045 		adev->mode_info.num_dig = 6;
4046 		break;
4047 	case CHIP_VEGA10:
4048 	case CHIP_VEGA12:
4049 	case CHIP_VEGA20:
4050 		adev->mode_info.num_crtc = 6;
4051 		adev->mode_info.num_hpd = 6;
4052 		adev->mode_info.num_dig = 6;
4053 		break;
4054 #if defined(CONFIG_DRM_AMD_DC_DCN)
4055 	case CHIP_RAVEN:
4056 	case CHIP_RENOIR:
4057 	case CHIP_VANGOGH:
4058 		adev->mode_info.num_crtc = 4;
4059 		adev->mode_info.num_hpd = 4;
4060 		adev->mode_info.num_dig = 4;
4061 		break;
4062 	case CHIP_NAVI10:
4063 	case CHIP_NAVI12:
4064 	case CHIP_SIENNA_CICHLID:
4065 	case CHIP_NAVY_FLOUNDER:
4066 		adev->mode_info.num_crtc = 6;
4067 		adev->mode_info.num_hpd = 6;
4068 		adev->mode_info.num_dig = 6;
4069 		break;
4070 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4071 	case CHIP_YELLOW_CARP:
4072 		adev->mode_info.num_crtc = 4;
4073 		adev->mode_info.num_hpd = 4;
4074 		adev->mode_info.num_dig = 4;
4075 		break;
4076 #endif
4077 	case CHIP_NAVI14:
4078 	case CHIP_DIMGREY_CAVEFISH:
4079 		adev->mode_info.num_crtc = 5;
4080 		adev->mode_info.num_hpd = 5;
4081 		adev->mode_info.num_dig = 5;
4082 		break;
4083 	case CHIP_BEIGE_GOBY:
4084 		adev->mode_info.num_crtc = 2;
4085 		adev->mode_info.num_hpd = 2;
4086 		adev->mode_info.num_dig = 2;
4087 		break;
4088 #endif
4089 	default:
4090 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4091 		return -EINVAL;
4092 	}
4093 
4094 	amdgpu_dm_set_irq_funcs(adev);
4095 
4096 	if (adev->mode_info.funcs == NULL)
4097 		adev->mode_info.funcs = &dm_display_funcs;
4098 
4099 	/*
4100 	 * Note: Do NOT change adev->audio_endpt_rreg and
4101 	 * adev->audio_endpt_wreg because they are initialised in
4102 	 * amdgpu_device_init()
4103 	 */
4104 #if defined(CONFIG_DEBUG_KERNEL_DC)
4105 	device_create_file(
4106 		adev_to_drm(adev)->dev,
4107 		&dev_attr_s3_debug);
4108 #endif
4109 
4110 	return 0;
4111 }
4112 
4113 static bool modeset_required(struct drm_crtc_state *crtc_state,
4114 			     struct dc_stream_state *new_stream,
4115 			     struct dc_stream_state *old_stream)
4116 {
4117 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4118 }
4119 
4120 static bool modereset_required(struct drm_crtc_state *crtc_state)
4121 {
4122 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4123 }
4124 
4125 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4126 {
4127 	drm_encoder_cleanup(encoder);
4128 	kfree(encoder);
4129 }
4130 
4131 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4132 	.destroy = amdgpu_dm_encoder_destroy,
4133 };
4134 
4135 
4136 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4137 					 struct drm_framebuffer *fb,
4138 					 int *min_downscale, int *max_upscale)
4139 {
4140 	struct amdgpu_device *adev = drm_to_adev(dev);
4141 	struct dc *dc = adev->dm.dc;
4142 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4143 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4144 
4145 	switch (fb->format->format) {
4146 	case DRM_FORMAT_P010:
4147 	case DRM_FORMAT_NV12:
4148 	case DRM_FORMAT_NV21:
4149 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4150 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4151 		break;
4152 
4153 	case DRM_FORMAT_XRGB16161616F:
4154 	case DRM_FORMAT_ARGB16161616F:
4155 	case DRM_FORMAT_XBGR16161616F:
4156 	case DRM_FORMAT_ABGR16161616F:
4157 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4158 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4159 		break;
4160 
4161 	default:
4162 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4163 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4164 		break;
4165 	}
4166 
4167 	/*
4168 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4169 	 * scaling factor of 1.0 == 1000 units.
4170 	 */
4171 	if (*max_upscale == 1)
4172 		*max_upscale = 1000;
4173 
4174 	if (*min_downscale == 1)
4175 		*min_downscale = 1000;
4176 }
4177 
4178 
4179 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4180 				struct dc_scaling_info *scaling_info)
4181 {
4182 	int scale_w, scale_h, min_downscale, max_upscale;
4183 
4184 	memset(scaling_info, 0, sizeof(*scaling_info));
4185 
4186 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4187 	scaling_info->src_rect.x = state->src_x >> 16;
4188 	scaling_info->src_rect.y = state->src_y >> 16;
4189 
4190 	/*
4191 	 * For reasons we don't (yet) fully understand a non-zero
4192 	 * src_y coordinate into an NV12 buffer can cause a
4193 	 * system hang. To avoid hangs (and maybe be overly cautious)
4194 	 * let's reject both non-zero src_x and src_y.
4195 	 *
4196 	 * We currently know of only one use-case to reproduce a
4197 	 * scenario with non-zero src_x and src_y for NV12, which
4198 	 * is to gesture the YouTube Android app into full screen
4199 	 * on ChromeOS.
4200 	 */
4201 	if (state->fb &&
4202 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4203 	    (scaling_info->src_rect.x != 0 ||
4204 	     scaling_info->src_rect.y != 0))
4205 		return -EINVAL;
4206 
4207 	scaling_info->src_rect.width = state->src_w >> 16;
4208 	if (scaling_info->src_rect.width == 0)
4209 		return -EINVAL;
4210 
4211 	scaling_info->src_rect.height = state->src_h >> 16;
4212 	if (scaling_info->src_rect.height == 0)
4213 		return -EINVAL;
4214 
4215 	scaling_info->dst_rect.x = state->crtc_x;
4216 	scaling_info->dst_rect.y = state->crtc_y;
4217 
4218 	if (state->crtc_w == 0)
4219 		return -EINVAL;
4220 
4221 	scaling_info->dst_rect.width = state->crtc_w;
4222 
4223 	if (state->crtc_h == 0)
4224 		return -EINVAL;
4225 
4226 	scaling_info->dst_rect.height = state->crtc_h;
4227 
4228 	/* DRM doesn't specify clipping on destination output. */
4229 	scaling_info->clip_rect = scaling_info->dst_rect;
4230 
4231 	/* Validate scaling per-format with DC plane caps */
4232 	if (state->plane && state->plane->dev && state->fb) {
4233 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4234 					     &min_downscale, &max_upscale);
4235 	} else {
4236 		min_downscale = 250;
4237 		max_upscale = 16000;
4238 	}
4239 
4240 	scale_w = scaling_info->dst_rect.width * 1000 /
4241 		  scaling_info->src_rect.width;
4242 
4243 	if (scale_w < min_downscale || scale_w > max_upscale)
4244 		return -EINVAL;
4245 
4246 	scale_h = scaling_info->dst_rect.height * 1000 /
4247 		  scaling_info->src_rect.height;
4248 
4249 	if (scale_h < min_downscale || scale_h > max_upscale)
4250 		return -EINVAL;
4251 
4252 	/*
4253 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4254 	 * assume reasonable defaults based on the format.
4255 	 */
4256 
4257 	return 0;
4258 }
4259 
4260 static void
4261 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4262 				 uint64_t tiling_flags)
4263 {
4264 	/* Fill GFX8 params */
4265 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4266 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4267 
4268 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4269 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4270 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4271 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4272 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4273 
4274 		/* XXX fix me for VI */
4275 		tiling_info->gfx8.num_banks = num_banks;
4276 		tiling_info->gfx8.array_mode =
4277 				DC_ARRAY_2D_TILED_THIN1;
4278 		tiling_info->gfx8.tile_split = tile_split;
4279 		tiling_info->gfx8.bank_width = bankw;
4280 		tiling_info->gfx8.bank_height = bankh;
4281 		tiling_info->gfx8.tile_aspect = mtaspect;
4282 		tiling_info->gfx8.tile_mode =
4283 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4284 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4285 			== DC_ARRAY_1D_TILED_THIN1) {
4286 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4287 	}
4288 
4289 	tiling_info->gfx8.pipe_config =
4290 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4291 }
4292 
4293 static void
4294 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4295 				  union dc_tiling_info *tiling_info)
4296 {
4297 	tiling_info->gfx9.num_pipes =
4298 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4299 	tiling_info->gfx9.num_banks =
4300 		adev->gfx.config.gb_addr_config_fields.num_banks;
4301 	tiling_info->gfx9.pipe_interleave =
4302 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4303 	tiling_info->gfx9.num_shader_engines =
4304 		adev->gfx.config.gb_addr_config_fields.num_se;
4305 	tiling_info->gfx9.max_compressed_frags =
4306 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4307 	tiling_info->gfx9.num_rb_per_se =
4308 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4309 	tiling_info->gfx9.shaderEnable = 1;
4310 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4311 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4312 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4313 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4314 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4315 	    adev->asic_type == CHIP_YELLOW_CARP ||
4316 #endif
4317 	    adev->asic_type == CHIP_VANGOGH)
4318 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4319 }
4320 
4321 static int
4322 validate_dcc(struct amdgpu_device *adev,
4323 	     const enum surface_pixel_format format,
4324 	     const enum dc_rotation_angle rotation,
4325 	     const union dc_tiling_info *tiling_info,
4326 	     const struct dc_plane_dcc_param *dcc,
4327 	     const struct dc_plane_address *address,
4328 	     const struct plane_size *plane_size)
4329 {
4330 	struct dc *dc = adev->dm.dc;
4331 	struct dc_dcc_surface_param input;
4332 	struct dc_surface_dcc_cap output;
4333 
4334 	memset(&input, 0, sizeof(input));
4335 	memset(&output, 0, sizeof(output));
4336 
4337 	if (!dcc->enable)
4338 		return 0;
4339 
4340 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4341 	    !dc->cap_funcs.get_dcc_compression_cap)
4342 		return -EINVAL;
4343 
4344 	input.format = format;
4345 	input.surface_size.width = plane_size->surface_size.width;
4346 	input.surface_size.height = plane_size->surface_size.height;
4347 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4348 
4349 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4350 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4351 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4352 		input.scan = SCAN_DIRECTION_VERTICAL;
4353 
4354 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4355 		return -EINVAL;
4356 
4357 	if (!output.capable)
4358 		return -EINVAL;
4359 
4360 	if (dcc->independent_64b_blks == 0 &&
4361 	    output.grph.rgb.independent_64b_blks != 0)
4362 		return -EINVAL;
4363 
4364 	return 0;
4365 }
4366 
4367 static bool
4368 modifier_has_dcc(uint64_t modifier)
4369 {
4370 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4371 }
4372 
4373 static unsigned
4374 modifier_gfx9_swizzle_mode(uint64_t modifier)
4375 {
4376 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4377 		return 0;
4378 
4379 	return AMD_FMT_MOD_GET(TILE, modifier);
4380 }
4381 
4382 static const struct drm_format_info *
4383 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4384 {
4385 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4386 }
4387 
4388 static void
4389 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4390 				    union dc_tiling_info *tiling_info,
4391 				    uint64_t modifier)
4392 {
4393 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4394 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4395 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4396 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4397 
4398 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4399 
4400 	if (!IS_AMD_FMT_MOD(modifier))
4401 		return;
4402 
4403 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4404 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4405 
4406 	if (adev->family >= AMDGPU_FAMILY_NV) {
4407 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4408 	} else {
4409 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4410 
4411 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4412 	}
4413 }
4414 
4415 enum dm_micro_swizzle {
4416 	MICRO_SWIZZLE_Z = 0,
4417 	MICRO_SWIZZLE_S = 1,
4418 	MICRO_SWIZZLE_D = 2,
4419 	MICRO_SWIZZLE_R = 3
4420 };
4421 
4422 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4423 					  uint32_t format,
4424 					  uint64_t modifier)
4425 {
4426 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4427 	const struct drm_format_info *info = drm_format_info(format);
4428 	int i;
4429 
4430 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4431 
4432 	if (!info)
4433 		return false;
4434 
4435 	/*
4436 	 * We always have to allow these modifiers:
4437 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4438 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4439 	 */
4440 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4441 	    modifier == DRM_FORMAT_MOD_INVALID) {
4442 		return true;
4443 	}
4444 
4445 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4446 	for (i = 0; i < plane->modifier_count; i++) {
4447 		if (modifier == plane->modifiers[i])
4448 			break;
4449 	}
4450 	if (i == plane->modifier_count)
4451 		return false;
4452 
4453 	/*
4454 	 * For D swizzle the canonical modifier depends on the bpp, so check
4455 	 * it here.
4456 	 */
4457 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4458 	    adev->family >= AMDGPU_FAMILY_NV) {
4459 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4460 			return false;
4461 	}
4462 
4463 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4464 	    info->cpp[0] < 8)
4465 		return false;
4466 
4467 	if (modifier_has_dcc(modifier)) {
4468 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4469 		if (info->cpp[0] != 4)
4470 			return false;
4471 		/* We support multi-planar formats, but not when combined with
4472 		 * additional DCC metadata planes. */
4473 		if (info->num_planes > 1)
4474 			return false;
4475 	}
4476 
4477 	return true;
4478 }
4479 
4480 static void
4481 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4482 {
4483 	if (!*mods)
4484 		return;
4485 
4486 	if (*cap - *size < 1) {
4487 		uint64_t new_cap = *cap * 2;
4488 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4489 
4490 		if (!new_mods) {
4491 			kfree(*mods);
4492 			*mods = NULL;
4493 			return;
4494 		}
4495 
4496 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4497 		kfree(*mods);
4498 		*mods = new_mods;
4499 		*cap = new_cap;
4500 	}
4501 
4502 	(*mods)[*size] = mod;
4503 	*size += 1;
4504 }
4505 
4506 static void
4507 add_gfx9_modifiers(const struct amdgpu_device *adev,
4508 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4509 {
4510 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4511 	int pipe_xor_bits = min(8, pipes +
4512 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4513 	int bank_xor_bits = min(8 - pipe_xor_bits,
4514 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4515 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4516 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4517 
4518 
4519 	if (adev->family == AMDGPU_FAMILY_RV) {
4520 		/* Raven2 and later */
4521 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4522 
4523 		/*
4524 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4525 		 * doesn't support _D on DCN
4526 		 */
4527 
4528 		if (has_constant_encode) {
4529 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4530 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4531 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4532 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4533 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4534 				    AMD_FMT_MOD_SET(DCC, 1) |
4535 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4536 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4537 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4538 		}
4539 
4540 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4541 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4542 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4543 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4544 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4545 			    AMD_FMT_MOD_SET(DCC, 1) |
4546 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4547 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4548 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4549 
4550 		if (has_constant_encode) {
4551 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4552 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4553 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4554 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4555 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4556 				    AMD_FMT_MOD_SET(DCC, 1) |
4557 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4558 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4559 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4560 
4561 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4562 				    AMD_FMT_MOD_SET(RB, rb) |
4563 				    AMD_FMT_MOD_SET(PIPE, pipes));
4564 		}
4565 
4566 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4567 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4568 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4569 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4570 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4571 			    AMD_FMT_MOD_SET(DCC, 1) |
4572 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4573 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4574 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4575 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4576 			    AMD_FMT_MOD_SET(RB, rb) |
4577 			    AMD_FMT_MOD_SET(PIPE, pipes));
4578 	}
4579 
4580 	/*
4581 	 * Only supported for 64bpp on Raven, will be filtered on format in
4582 	 * dm_plane_format_mod_supported.
4583 	 */
4584 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4585 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4586 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4587 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4588 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4589 
4590 	if (adev->family == AMDGPU_FAMILY_RV) {
4591 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4592 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4593 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4594 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4595 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4596 	}
4597 
4598 	/*
4599 	 * Only supported for 64bpp on Raven, will be filtered on format in
4600 	 * dm_plane_format_mod_supported.
4601 	 */
4602 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4604 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4605 
4606 	if (adev->family == AMDGPU_FAMILY_RV) {
4607 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4609 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4610 	}
4611 }
4612 
4613 static void
4614 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4615 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4616 {
4617 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4618 
4619 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4620 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4621 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4622 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4623 		    AMD_FMT_MOD_SET(DCC, 1) |
4624 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4625 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4626 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4627 
4628 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4630 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4631 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4632 		    AMD_FMT_MOD_SET(DCC, 1) |
4633 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4634 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4635 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4636 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4637 
4638 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4639 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4640 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4641 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4642 
4643 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4644 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4645 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4646 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4647 
4648 
4649 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4650 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4651 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4652 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4653 
4654 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4655 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4656 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4657 }
4658 
4659 static void
4660 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4661 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4662 {
4663 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4664 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4665 
4666 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4667 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4668 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4669 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4670 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4671 		    AMD_FMT_MOD_SET(DCC, 1) |
4672 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4673 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4674 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4675 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4676 
4677 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4678 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4679 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4680 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4681 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4682 		    AMD_FMT_MOD_SET(DCC, 1) |
4683 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4684 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4685 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4686 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4687 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4688 
4689 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4690 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4691 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4692 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4693 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4694 
4695 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4696 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4697 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4698 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4699 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4700 
4701 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4702 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4703 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4704 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4705 
4706 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4707 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4708 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4709 }
4710 
4711 static int
4712 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4713 {
4714 	uint64_t size = 0, capacity = 128;
4715 	*mods = NULL;
4716 
4717 	/* We have not hooked up any pre-GFX9 modifiers. */
4718 	if (adev->family < AMDGPU_FAMILY_AI)
4719 		return 0;
4720 
4721 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4722 
4723 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4724 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4725 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4726 		return *mods ? 0 : -ENOMEM;
4727 	}
4728 
4729 	switch (adev->family) {
4730 	case AMDGPU_FAMILY_AI:
4731 	case AMDGPU_FAMILY_RV:
4732 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4733 		break;
4734 	case AMDGPU_FAMILY_NV:
4735 	case AMDGPU_FAMILY_VGH:
4736 	case AMDGPU_FAMILY_YC:
4737 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4738 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4739 		else
4740 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4741 		break;
4742 	}
4743 
4744 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4745 
4746 	/* INVALID marks the end of the list. */
4747 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4748 
4749 	if (!*mods)
4750 		return -ENOMEM;
4751 
4752 	return 0;
4753 }
4754 
4755 static int
4756 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4757 					  const struct amdgpu_framebuffer *afb,
4758 					  const enum surface_pixel_format format,
4759 					  const enum dc_rotation_angle rotation,
4760 					  const struct plane_size *plane_size,
4761 					  union dc_tiling_info *tiling_info,
4762 					  struct dc_plane_dcc_param *dcc,
4763 					  struct dc_plane_address *address,
4764 					  const bool force_disable_dcc)
4765 {
4766 	const uint64_t modifier = afb->base.modifier;
4767 	int ret;
4768 
4769 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4770 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4771 
4772 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4773 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4774 
4775 		dcc->enable = 1;
4776 		dcc->meta_pitch = afb->base.pitches[1];
4777 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4778 
4779 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4780 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4781 	}
4782 
4783 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4784 	if (ret)
4785 		return ret;
4786 
4787 	return 0;
4788 }
4789 
4790 static int
4791 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4792 			     const struct amdgpu_framebuffer *afb,
4793 			     const enum surface_pixel_format format,
4794 			     const enum dc_rotation_angle rotation,
4795 			     const uint64_t tiling_flags,
4796 			     union dc_tiling_info *tiling_info,
4797 			     struct plane_size *plane_size,
4798 			     struct dc_plane_dcc_param *dcc,
4799 			     struct dc_plane_address *address,
4800 			     bool tmz_surface,
4801 			     bool force_disable_dcc)
4802 {
4803 	const struct drm_framebuffer *fb = &afb->base;
4804 	int ret;
4805 
4806 	memset(tiling_info, 0, sizeof(*tiling_info));
4807 	memset(plane_size, 0, sizeof(*plane_size));
4808 	memset(dcc, 0, sizeof(*dcc));
4809 	memset(address, 0, sizeof(*address));
4810 
4811 	address->tmz_surface = tmz_surface;
4812 
4813 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4814 		uint64_t addr = afb->address + fb->offsets[0];
4815 
4816 		plane_size->surface_size.x = 0;
4817 		plane_size->surface_size.y = 0;
4818 		plane_size->surface_size.width = fb->width;
4819 		plane_size->surface_size.height = fb->height;
4820 		plane_size->surface_pitch =
4821 			fb->pitches[0] / fb->format->cpp[0];
4822 
4823 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4824 		address->grph.addr.low_part = lower_32_bits(addr);
4825 		address->grph.addr.high_part = upper_32_bits(addr);
4826 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4827 		uint64_t luma_addr = afb->address + fb->offsets[0];
4828 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4829 
4830 		plane_size->surface_size.x = 0;
4831 		plane_size->surface_size.y = 0;
4832 		plane_size->surface_size.width = fb->width;
4833 		plane_size->surface_size.height = fb->height;
4834 		plane_size->surface_pitch =
4835 			fb->pitches[0] / fb->format->cpp[0];
4836 
4837 		plane_size->chroma_size.x = 0;
4838 		plane_size->chroma_size.y = 0;
4839 		/* TODO: set these based on surface format */
4840 		plane_size->chroma_size.width = fb->width / 2;
4841 		plane_size->chroma_size.height = fb->height / 2;
4842 
4843 		plane_size->chroma_pitch =
4844 			fb->pitches[1] / fb->format->cpp[1];
4845 
4846 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4847 		address->video_progressive.luma_addr.low_part =
4848 			lower_32_bits(luma_addr);
4849 		address->video_progressive.luma_addr.high_part =
4850 			upper_32_bits(luma_addr);
4851 		address->video_progressive.chroma_addr.low_part =
4852 			lower_32_bits(chroma_addr);
4853 		address->video_progressive.chroma_addr.high_part =
4854 			upper_32_bits(chroma_addr);
4855 	}
4856 
4857 	if (adev->family >= AMDGPU_FAMILY_AI) {
4858 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4859 								rotation, plane_size,
4860 								tiling_info, dcc,
4861 								address,
4862 								force_disable_dcc);
4863 		if (ret)
4864 			return ret;
4865 	} else {
4866 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4867 	}
4868 
4869 	return 0;
4870 }
4871 
4872 static void
4873 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4874 			       bool *per_pixel_alpha, bool *global_alpha,
4875 			       int *global_alpha_value)
4876 {
4877 	*per_pixel_alpha = false;
4878 	*global_alpha = false;
4879 	*global_alpha_value = 0xff;
4880 
4881 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4882 		return;
4883 
4884 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4885 		static const uint32_t alpha_formats[] = {
4886 			DRM_FORMAT_ARGB8888,
4887 			DRM_FORMAT_RGBA8888,
4888 			DRM_FORMAT_ABGR8888,
4889 		};
4890 		uint32_t format = plane_state->fb->format->format;
4891 		unsigned int i;
4892 
4893 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4894 			if (format == alpha_formats[i]) {
4895 				*per_pixel_alpha = true;
4896 				break;
4897 			}
4898 		}
4899 	}
4900 
4901 	if (plane_state->alpha < 0xffff) {
4902 		*global_alpha = true;
4903 		*global_alpha_value = plane_state->alpha >> 8;
4904 	}
4905 }
4906 
4907 static int
4908 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4909 			    const enum surface_pixel_format format,
4910 			    enum dc_color_space *color_space)
4911 {
4912 	bool full_range;
4913 
4914 	*color_space = COLOR_SPACE_SRGB;
4915 
4916 	/* DRM color properties only affect non-RGB formats. */
4917 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4918 		return 0;
4919 
4920 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4921 
4922 	switch (plane_state->color_encoding) {
4923 	case DRM_COLOR_YCBCR_BT601:
4924 		if (full_range)
4925 			*color_space = COLOR_SPACE_YCBCR601;
4926 		else
4927 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4928 		break;
4929 
4930 	case DRM_COLOR_YCBCR_BT709:
4931 		if (full_range)
4932 			*color_space = COLOR_SPACE_YCBCR709;
4933 		else
4934 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4935 		break;
4936 
4937 	case DRM_COLOR_YCBCR_BT2020:
4938 		if (full_range)
4939 			*color_space = COLOR_SPACE_2020_YCBCR;
4940 		else
4941 			return -EINVAL;
4942 		break;
4943 
4944 	default:
4945 		return -EINVAL;
4946 	}
4947 
4948 	return 0;
4949 }
4950 
4951 static int
4952 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4953 			    const struct drm_plane_state *plane_state,
4954 			    const uint64_t tiling_flags,
4955 			    struct dc_plane_info *plane_info,
4956 			    struct dc_plane_address *address,
4957 			    bool tmz_surface,
4958 			    bool force_disable_dcc)
4959 {
4960 	const struct drm_framebuffer *fb = plane_state->fb;
4961 	const struct amdgpu_framebuffer *afb =
4962 		to_amdgpu_framebuffer(plane_state->fb);
4963 	int ret;
4964 
4965 	memset(plane_info, 0, sizeof(*plane_info));
4966 
4967 	switch (fb->format->format) {
4968 	case DRM_FORMAT_C8:
4969 		plane_info->format =
4970 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4971 		break;
4972 	case DRM_FORMAT_RGB565:
4973 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4974 		break;
4975 	case DRM_FORMAT_XRGB8888:
4976 	case DRM_FORMAT_ARGB8888:
4977 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4978 		break;
4979 	case DRM_FORMAT_XRGB2101010:
4980 	case DRM_FORMAT_ARGB2101010:
4981 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4982 		break;
4983 	case DRM_FORMAT_XBGR2101010:
4984 	case DRM_FORMAT_ABGR2101010:
4985 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4986 		break;
4987 	case DRM_FORMAT_XBGR8888:
4988 	case DRM_FORMAT_ABGR8888:
4989 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4990 		break;
4991 	case DRM_FORMAT_NV21:
4992 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4993 		break;
4994 	case DRM_FORMAT_NV12:
4995 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4996 		break;
4997 	case DRM_FORMAT_P010:
4998 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4999 		break;
5000 	case DRM_FORMAT_XRGB16161616F:
5001 	case DRM_FORMAT_ARGB16161616F:
5002 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5003 		break;
5004 	case DRM_FORMAT_XBGR16161616F:
5005 	case DRM_FORMAT_ABGR16161616F:
5006 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5007 		break;
5008 	case DRM_FORMAT_XRGB16161616:
5009 	case DRM_FORMAT_ARGB16161616:
5010 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5011 		break;
5012 	case DRM_FORMAT_XBGR16161616:
5013 	case DRM_FORMAT_ABGR16161616:
5014 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5015 		break;
5016 	default:
5017 		DRM_ERROR(
5018 			"Unsupported screen format %p4cc\n",
5019 			&fb->format->format);
5020 		return -EINVAL;
5021 	}
5022 
5023 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5024 	case DRM_MODE_ROTATE_0:
5025 		plane_info->rotation = ROTATION_ANGLE_0;
5026 		break;
5027 	case DRM_MODE_ROTATE_90:
5028 		plane_info->rotation = ROTATION_ANGLE_90;
5029 		break;
5030 	case DRM_MODE_ROTATE_180:
5031 		plane_info->rotation = ROTATION_ANGLE_180;
5032 		break;
5033 	case DRM_MODE_ROTATE_270:
5034 		plane_info->rotation = ROTATION_ANGLE_270;
5035 		break;
5036 	default:
5037 		plane_info->rotation = ROTATION_ANGLE_0;
5038 		break;
5039 	}
5040 
5041 	plane_info->visible = true;
5042 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5043 
5044 	plane_info->layer_index = 0;
5045 
5046 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5047 					  &plane_info->color_space);
5048 	if (ret)
5049 		return ret;
5050 
5051 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5052 					   plane_info->rotation, tiling_flags,
5053 					   &plane_info->tiling_info,
5054 					   &plane_info->plane_size,
5055 					   &plane_info->dcc, address, tmz_surface,
5056 					   force_disable_dcc);
5057 	if (ret)
5058 		return ret;
5059 
5060 	fill_blending_from_plane_state(
5061 		plane_state, &plane_info->per_pixel_alpha,
5062 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5063 
5064 	return 0;
5065 }
5066 
5067 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5068 				    struct dc_plane_state *dc_plane_state,
5069 				    struct drm_plane_state *plane_state,
5070 				    struct drm_crtc_state *crtc_state)
5071 {
5072 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5073 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5074 	struct dc_scaling_info scaling_info;
5075 	struct dc_plane_info plane_info;
5076 	int ret;
5077 	bool force_disable_dcc = false;
5078 
5079 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5080 	if (ret)
5081 		return ret;
5082 
5083 	dc_plane_state->src_rect = scaling_info.src_rect;
5084 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5085 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5086 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5087 
5088 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5089 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5090 					  afb->tiling_flags,
5091 					  &plane_info,
5092 					  &dc_plane_state->address,
5093 					  afb->tmz_surface,
5094 					  force_disable_dcc);
5095 	if (ret)
5096 		return ret;
5097 
5098 	dc_plane_state->format = plane_info.format;
5099 	dc_plane_state->color_space = plane_info.color_space;
5100 	dc_plane_state->format = plane_info.format;
5101 	dc_plane_state->plane_size = plane_info.plane_size;
5102 	dc_plane_state->rotation = plane_info.rotation;
5103 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5104 	dc_plane_state->stereo_format = plane_info.stereo_format;
5105 	dc_plane_state->tiling_info = plane_info.tiling_info;
5106 	dc_plane_state->visible = plane_info.visible;
5107 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5108 	dc_plane_state->global_alpha = plane_info.global_alpha;
5109 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5110 	dc_plane_state->dcc = plane_info.dcc;
5111 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5112 	dc_plane_state->flip_int_enabled = true;
5113 
5114 	/*
5115 	 * Always set input transfer function, since plane state is refreshed
5116 	 * every time.
5117 	 */
5118 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5119 	if (ret)
5120 		return ret;
5121 
5122 	return 0;
5123 }
5124 
5125 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5126 					   const struct dm_connector_state *dm_state,
5127 					   struct dc_stream_state *stream)
5128 {
5129 	enum amdgpu_rmx_type rmx_type;
5130 
5131 	struct rect src = { 0 }; /* viewport in composition space*/
5132 	struct rect dst = { 0 }; /* stream addressable area */
5133 
5134 	/* no mode. nothing to be done */
5135 	if (!mode)
5136 		return;
5137 
5138 	/* Full screen scaling by default */
5139 	src.width = mode->hdisplay;
5140 	src.height = mode->vdisplay;
5141 	dst.width = stream->timing.h_addressable;
5142 	dst.height = stream->timing.v_addressable;
5143 
5144 	if (dm_state) {
5145 		rmx_type = dm_state->scaling;
5146 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5147 			if (src.width * dst.height <
5148 					src.height * dst.width) {
5149 				/* height needs less upscaling/more downscaling */
5150 				dst.width = src.width *
5151 						dst.height / src.height;
5152 			} else {
5153 				/* width needs less upscaling/more downscaling */
5154 				dst.height = src.height *
5155 						dst.width / src.width;
5156 			}
5157 		} else if (rmx_type == RMX_CENTER) {
5158 			dst = src;
5159 		}
5160 
5161 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5162 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5163 
5164 		if (dm_state->underscan_enable) {
5165 			dst.x += dm_state->underscan_hborder / 2;
5166 			dst.y += dm_state->underscan_vborder / 2;
5167 			dst.width -= dm_state->underscan_hborder;
5168 			dst.height -= dm_state->underscan_vborder;
5169 		}
5170 	}
5171 
5172 	stream->src = src;
5173 	stream->dst = dst;
5174 
5175 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5176 		      dst.x, dst.y, dst.width, dst.height);
5177 
5178 }
5179 
5180 static enum dc_color_depth
5181 convert_color_depth_from_display_info(const struct drm_connector *connector,
5182 				      bool is_y420, int requested_bpc)
5183 {
5184 	uint8_t bpc;
5185 
5186 	if (is_y420) {
5187 		bpc = 8;
5188 
5189 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5190 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5191 			bpc = 16;
5192 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5193 			bpc = 12;
5194 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5195 			bpc = 10;
5196 	} else {
5197 		bpc = (uint8_t)connector->display_info.bpc;
5198 		/* Assume 8 bpc by default if no bpc is specified. */
5199 		bpc = bpc ? bpc : 8;
5200 	}
5201 
5202 	if (requested_bpc > 0) {
5203 		/*
5204 		 * Cap display bpc based on the user requested value.
5205 		 *
5206 		 * The value for state->max_bpc may not correctly updated
5207 		 * depending on when the connector gets added to the state
5208 		 * or if this was called outside of atomic check, so it
5209 		 * can't be used directly.
5210 		 */
5211 		bpc = min_t(u8, bpc, requested_bpc);
5212 
5213 		/* Round down to the nearest even number. */
5214 		bpc = bpc - (bpc & 1);
5215 	}
5216 
5217 	switch (bpc) {
5218 	case 0:
5219 		/*
5220 		 * Temporary Work around, DRM doesn't parse color depth for
5221 		 * EDID revision before 1.4
5222 		 * TODO: Fix edid parsing
5223 		 */
5224 		return COLOR_DEPTH_888;
5225 	case 6:
5226 		return COLOR_DEPTH_666;
5227 	case 8:
5228 		return COLOR_DEPTH_888;
5229 	case 10:
5230 		return COLOR_DEPTH_101010;
5231 	case 12:
5232 		return COLOR_DEPTH_121212;
5233 	case 14:
5234 		return COLOR_DEPTH_141414;
5235 	case 16:
5236 		return COLOR_DEPTH_161616;
5237 	default:
5238 		return COLOR_DEPTH_UNDEFINED;
5239 	}
5240 }
5241 
5242 static enum dc_aspect_ratio
5243 get_aspect_ratio(const struct drm_display_mode *mode_in)
5244 {
5245 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5246 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5247 }
5248 
5249 static enum dc_color_space
5250 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5251 {
5252 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5253 
5254 	switch (dc_crtc_timing->pixel_encoding)	{
5255 	case PIXEL_ENCODING_YCBCR422:
5256 	case PIXEL_ENCODING_YCBCR444:
5257 	case PIXEL_ENCODING_YCBCR420:
5258 	{
5259 		/*
5260 		 * 27030khz is the separation point between HDTV and SDTV
5261 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5262 		 * respectively
5263 		 */
5264 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5265 			if (dc_crtc_timing->flags.Y_ONLY)
5266 				color_space =
5267 					COLOR_SPACE_YCBCR709_LIMITED;
5268 			else
5269 				color_space = COLOR_SPACE_YCBCR709;
5270 		} else {
5271 			if (dc_crtc_timing->flags.Y_ONLY)
5272 				color_space =
5273 					COLOR_SPACE_YCBCR601_LIMITED;
5274 			else
5275 				color_space = COLOR_SPACE_YCBCR601;
5276 		}
5277 
5278 	}
5279 	break;
5280 	case PIXEL_ENCODING_RGB:
5281 		color_space = COLOR_SPACE_SRGB;
5282 		break;
5283 
5284 	default:
5285 		WARN_ON(1);
5286 		break;
5287 	}
5288 
5289 	return color_space;
5290 }
5291 
5292 static bool adjust_colour_depth_from_display_info(
5293 	struct dc_crtc_timing *timing_out,
5294 	const struct drm_display_info *info)
5295 {
5296 	enum dc_color_depth depth = timing_out->display_color_depth;
5297 	int normalized_clk;
5298 	do {
5299 		normalized_clk = timing_out->pix_clk_100hz / 10;
5300 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5301 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5302 			normalized_clk /= 2;
5303 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5304 		switch (depth) {
5305 		case COLOR_DEPTH_888:
5306 			break;
5307 		case COLOR_DEPTH_101010:
5308 			normalized_clk = (normalized_clk * 30) / 24;
5309 			break;
5310 		case COLOR_DEPTH_121212:
5311 			normalized_clk = (normalized_clk * 36) / 24;
5312 			break;
5313 		case COLOR_DEPTH_161616:
5314 			normalized_clk = (normalized_clk * 48) / 24;
5315 			break;
5316 		default:
5317 			/* The above depths are the only ones valid for HDMI. */
5318 			return false;
5319 		}
5320 		if (normalized_clk <= info->max_tmds_clock) {
5321 			timing_out->display_color_depth = depth;
5322 			return true;
5323 		}
5324 	} while (--depth > COLOR_DEPTH_666);
5325 	return false;
5326 }
5327 
5328 static void fill_stream_properties_from_drm_display_mode(
5329 	struct dc_stream_state *stream,
5330 	const struct drm_display_mode *mode_in,
5331 	const struct drm_connector *connector,
5332 	const struct drm_connector_state *connector_state,
5333 	const struct dc_stream_state *old_stream,
5334 	int requested_bpc)
5335 {
5336 	struct dc_crtc_timing *timing_out = &stream->timing;
5337 	const struct drm_display_info *info = &connector->display_info;
5338 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5339 	struct hdmi_vendor_infoframe hv_frame;
5340 	struct hdmi_avi_infoframe avi_frame;
5341 
5342 	memset(&hv_frame, 0, sizeof(hv_frame));
5343 	memset(&avi_frame, 0, sizeof(avi_frame));
5344 
5345 	timing_out->h_border_left = 0;
5346 	timing_out->h_border_right = 0;
5347 	timing_out->v_border_top = 0;
5348 	timing_out->v_border_bottom = 0;
5349 	/* TODO: un-hardcode */
5350 	if (drm_mode_is_420_only(info, mode_in)
5351 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5352 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5353 	else if (drm_mode_is_420_also(info, mode_in)
5354 			&& aconnector->force_yuv420_output)
5355 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5356 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5357 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5358 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5359 	else
5360 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5361 
5362 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5363 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5364 		connector,
5365 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5366 		requested_bpc);
5367 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5368 	timing_out->hdmi_vic = 0;
5369 
5370 	if(old_stream) {
5371 		timing_out->vic = old_stream->timing.vic;
5372 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5373 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5374 	} else {
5375 		timing_out->vic = drm_match_cea_mode(mode_in);
5376 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5377 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5378 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5379 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5380 	}
5381 
5382 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5383 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5384 		timing_out->vic = avi_frame.video_code;
5385 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5386 		timing_out->hdmi_vic = hv_frame.vic;
5387 	}
5388 
5389 	if (is_freesync_video_mode(mode_in, aconnector)) {
5390 		timing_out->h_addressable = mode_in->hdisplay;
5391 		timing_out->h_total = mode_in->htotal;
5392 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5393 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5394 		timing_out->v_total = mode_in->vtotal;
5395 		timing_out->v_addressable = mode_in->vdisplay;
5396 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5397 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5398 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5399 	} else {
5400 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5401 		timing_out->h_total = mode_in->crtc_htotal;
5402 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5403 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5404 		timing_out->v_total = mode_in->crtc_vtotal;
5405 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5406 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5407 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5408 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5409 	}
5410 
5411 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5412 
5413 	stream->output_color_space = get_output_color_space(timing_out);
5414 
5415 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5416 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5417 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5418 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5419 		    drm_mode_is_420_also(info, mode_in) &&
5420 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5421 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5422 			adjust_colour_depth_from_display_info(timing_out, info);
5423 		}
5424 	}
5425 }
5426 
5427 static void fill_audio_info(struct audio_info *audio_info,
5428 			    const struct drm_connector *drm_connector,
5429 			    const struct dc_sink *dc_sink)
5430 {
5431 	int i = 0;
5432 	int cea_revision = 0;
5433 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5434 
5435 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5436 	audio_info->product_id = edid_caps->product_id;
5437 
5438 	cea_revision = drm_connector->display_info.cea_rev;
5439 
5440 	strscpy(audio_info->display_name,
5441 		edid_caps->display_name,
5442 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5443 
5444 	if (cea_revision >= 3) {
5445 		audio_info->mode_count = edid_caps->audio_mode_count;
5446 
5447 		for (i = 0; i < audio_info->mode_count; ++i) {
5448 			audio_info->modes[i].format_code =
5449 					(enum audio_format_code)
5450 					(edid_caps->audio_modes[i].format_code);
5451 			audio_info->modes[i].channel_count =
5452 					edid_caps->audio_modes[i].channel_count;
5453 			audio_info->modes[i].sample_rates.all =
5454 					edid_caps->audio_modes[i].sample_rate;
5455 			audio_info->modes[i].sample_size =
5456 					edid_caps->audio_modes[i].sample_size;
5457 		}
5458 	}
5459 
5460 	audio_info->flags.all = edid_caps->speaker_flags;
5461 
5462 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5463 	if (drm_connector->latency_present[0]) {
5464 		audio_info->video_latency = drm_connector->video_latency[0];
5465 		audio_info->audio_latency = drm_connector->audio_latency[0];
5466 	}
5467 
5468 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5469 
5470 }
5471 
5472 static void
5473 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5474 				      struct drm_display_mode *dst_mode)
5475 {
5476 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5477 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5478 	dst_mode->crtc_clock = src_mode->crtc_clock;
5479 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5480 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5481 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5482 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5483 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5484 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5485 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5486 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5487 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5488 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5489 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5490 }
5491 
5492 static void
5493 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5494 					const struct drm_display_mode *native_mode,
5495 					bool scale_enabled)
5496 {
5497 	if (scale_enabled) {
5498 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5499 	} else if (native_mode->clock == drm_mode->clock &&
5500 			native_mode->htotal == drm_mode->htotal &&
5501 			native_mode->vtotal == drm_mode->vtotal) {
5502 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5503 	} else {
5504 		/* no scaling nor amdgpu inserted, no need to patch */
5505 	}
5506 }
5507 
5508 static struct dc_sink *
5509 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5510 {
5511 	struct dc_sink_init_data sink_init_data = { 0 };
5512 	struct dc_sink *sink = NULL;
5513 	sink_init_data.link = aconnector->dc_link;
5514 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5515 
5516 	sink = dc_sink_create(&sink_init_data);
5517 	if (!sink) {
5518 		DRM_ERROR("Failed to create sink!\n");
5519 		return NULL;
5520 	}
5521 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5522 
5523 	return sink;
5524 }
5525 
5526 static void set_multisync_trigger_params(
5527 		struct dc_stream_state *stream)
5528 {
5529 	struct dc_stream_state *master = NULL;
5530 
5531 	if (stream->triggered_crtc_reset.enabled) {
5532 		master = stream->triggered_crtc_reset.event_source;
5533 		stream->triggered_crtc_reset.event =
5534 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5535 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5536 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5537 	}
5538 }
5539 
5540 static void set_master_stream(struct dc_stream_state *stream_set[],
5541 			      int stream_count)
5542 {
5543 	int j, highest_rfr = 0, master_stream = 0;
5544 
5545 	for (j = 0;  j < stream_count; j++) {
5546 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5547 			int refresh_rate = 0;
5548 
5549 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5550 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5551 			if (refresh_rate > highest_rfr) {
5552 				highest_rfr = refresh_rate;
5553 				master_stream = j;
5554 			}
5555 		}
5556 	}
5557 	for (j = 0;  j < stream_count; j++) {
5558 		if (stream_set[j])
5559 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5560 	}
5561 }
5562 
5563 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5564 {
5565 	int i = 0;
5566 	struct dc_stream_state *stream;
5567 
5568 	if (context->stream_count < 2)
5569 		return;
5570 	for (i = 0; i < context->stream_count ; i++) {
5571 		if (!context->streams[i])
5572 			continue;
5573 		/*
5574 		 * TODO: add a function to read AMD VSDB bits and set
5575 		 * crtc_sync_master.multi_sync_enabled flag
5576 		 * For now it's set to false
5577 		 */
5578 	}
5579 
5580 	set_master_stream(context->streams, context->stream_count);
5581 
5582 	for (i = 0; i < context->stream_count ; i++) {
5583 		stream = context->streams[i];
5584 
5585 		if (!stream)
5586 			continue;
5587 
5588 		set_multisync_trigger_params(stream);
5589 	}
5590 }
5591 
5592 #if defined(CONFIG_DRM_AMD_DC_DCN)
5593 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5594 							struct dc_sink *sink, struct dc_stream_state *stream,
5595 							struct dsc_dec_dpcd_caps *dsc_caps)
5596 {
5597 	stream->timing.flags.DSC = 0;
5598 
5599 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5600 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5601 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5602 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5603 				      dsc_caps);
5604 	}
5605 }
5606 
5607 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5608 										struct dc_sink *sink, struct dc_stream_state *stream,
5609 										struct dsc_dec_dpcd_caps *dsc_caps)
5610 {
5611 	struct drm_connector *drm_connector = &aconnector->base;
5612 	uint32_t link_bandwidth_kbps;
5613 
5614 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5615 							dc_link_get_link_cap(aconnector->dc_link));
5616 	/* Set DSC policy according to dsc_clock_en */
5617 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5618 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5619 
5620 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5621 
5622 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5623 						dsc_caps,
5624 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5625 						0,
5626 						link_bandwidth_kbps,
5627 						&stream->timing,
5628 						&stream->timing.dsc_cfg)) {
5629 			stream->timing.flags.DSC = 1;
5630 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5631 		}
5632 	}
5633 
5634 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5635 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5636 		stream->timing.flags.DSC = 1;
5637 
5638 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5639 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5640 
5641 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5642 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5643 
5644 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5645 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5646 }
5647 #endif
5648 
5649 static struct drm_display_mode *
5650 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5651 			  bool use_probed_modes)
5652 {
5653 	struct drm_display_mode *m, *m_pref = NULL;
5654 	u16 current_refresh, highest_refresh;
5655 	struct list_head *list_head = use_probed_modes ?
5656 						    &aconnector->base.probed_modes :
5657 						    &aconnector->base.modes;
5658 
5659 	if (aconnector->freesync_vid_base.clock != 0)
5660 		return &aconnector->freesync_vid_base;
5661 
5662 	/* Find the preferred mode */
5663 	list_for_each_entry (m, list_head, head) {
5664 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5665 			m_pref = m;
5666 			break;
5667 		}
5668 	}
5669 
5670 	if (!m_pref) {
5671 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5672 		m_pref = list_first_entry_or_null(
5673 			&aconnector->base.modes, struct drm_display_mode, head);
5674 		if (!m_pref) {
5675 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5676 			return NULL;
5677 		}
5678 	}
5679 
5680 	highest_refresh = drm_mode_vrefresh(m_pref);
5681 
5682 	/*
5683 	 * Find the mode with highest refresh rate with same resolution.
5684 	 * For some monitors, preferred mode is not the mode with highest
5685 	 * supported refresh rate.
5686 	 */
5687 	list_for_each_entry (m, list_head, head) {
5688 		current_refresh  = drm_mode_vrefresh(m);
5689 
5690 		if (m->hdisplay == m_pref->hdisplay &&
5691 		    m->vdisplay == m_pref->vdisplay &&
5692 		    highest_refresh < current_refresh) {
5693 			highest_refresh = current_refresh;
5694 			m_pref = m;
5695 		}
5696 	}
5697 
5698 	aconnector->freesync_vid_base = *m_pref;
5699 	return m_pref;
5700 }
5701 
5702 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5703 				   struct amdgpu_dm_connector *aconnector)
5704 {
5705 	struct drm_display_mode *high_mode;
5706 	int timing_diff;
5707 
5708 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5709 	if (!high_mode || !mode)
5710 		return false;
5711 
5712 	timing_diff = high_mode->vtotal - mode->vtotal;
5713 
5714 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5715 	    high_mode->hdisplay != mode->hdisplay ||
5716 	    high_mode->vdisplay != mode->vdisplay ||
5717 	    high_mode->hsync_start != mode->hsync_start ||
5718 	    high_mode->hsync_end != mode->hsync_end ||
5719 	    high_mode->htotal != mode->htotal ||
5720 	    high_mode->hskew != mode->hskew ||
5721 	    high_mode->vscan != mode->vscan ||
5722 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5723 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5724 		return false;
5725 	else
5726 		return true;
5727 }
5728 
5729 static struct dc_stream_state *
5730 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5731 		       const struct drm_display_mode *drm_mode,
5732 		       const struct dm_connector_state *dm_state,
5733 		       const struct dc_stream_state *old_stream,
5734 		       int requested_bpc)
5735 {
5736 	struct drm_display_mode *preferred_mode = NULL;
5737 	struct drm_connector *drm_connector;
5738 	const struct drm_connector_state *con_state =
5739 		dm_state ? &dm_state->base : NULL;
5740 	struct dc_stream_state *stream = NULL;
5741 	struct drm_display_mode mode = *drm_mode;
5742 	struct drm_display_mode saved_mode;
5743 	struct drm_display_mode *freesync_mode = NULL;
5744 	bool native_mode_found = false;
5745 	bool recalculate_timing = false;
5746 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5747 	int mode_refresh;
5748 	int preferred_refresh = 0;
5749 #if defined(CONFIG_DRM_AMD_DC_DCN)
5750 	struct dsc_dec_dpcd_caps dsc_caps;
5751 #endif
5752 	struct dc_sink *sink = NULL;
5753 
5754 	memset(&saved_mode, 0, sizeof(saved_mode));
5755 
5756 	if (aconnector == NULL) {
5757 		DRM_ERROR("aconnector is NULL!\n");
5758 		return stream;
5759 	}
5760 
5761 	drm_connector = &aconnector->base;
5762 
5763 	if (!aconnector->dc_sink) {
5764 		sink = create_fake_sink(aconnector);
5765 		if (!sink)
5766 			return stream;
5767 	} else {
5768 		sink = aconnector->dc_sink;
5769 		dc_sink_retain(sink);
5770 	}
5771 
5772 	stream = dc_create_stream_for_sink(sink);
5773 
5774 	if (stream == NULL) {
5775 		DRM_ERROR("Failed to create stream for sink!\n");
5776 		goto finish;
5777 	}
5778 
5779 	stream->dm_stream_context = aconnector;
5780 
5781 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5782 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5783 
5784 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5785 		/* Search for preferred mode */
5786 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5787 			native_mode_found = true;
5788 			break;
5789 		}
5790 	}
5791 	if (!native_mode_found)
5792 		preferred_mode = list_first_entry_or_null(
5793 				&aconnector->base.modes,
5794 				struct drm_display_mode,
5795 				head);
5796 
5797 	mode_refresh = drm_mode_vrefresh(&mode);
5798 
5799 	if (preferred_mode == NULL) {
5800 		/*
5801 		 * This may not be an error, the use case is when we have no
5802 		 * usermode calls to reset and set mode upon hotplug. In this
5803 		 * case, we call set mode ourselves to restore the previous mode
5804 		 * and the modelist may not be filled in in time.
5805 		 */
5806 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5807 	} else {
5808 		recalculate_timing = amdgpu_freesync_vid_mode &&
5809 				 is_freesync_video_mode(&mode, aconnector);
5810 		if (recalculate_timing) {
5811 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5812 			saved_mode = mode;
5813 			mode = *freesync_mode;
5814 		} else {
5815 			decide_crtc_timing_for_drm_display_mode(
5816 				&mode, preferred_mode, scale);
5817 
5818 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5819 		}
5820 	}
5821 
5822 	if (recalculate_timing)
5823 		drm_mode_set_crtcinfo(&saved_mode, 0);
5824 	else if (!dm_state)
5825 		drm_mode_set_crtcinfo(&mode, 0);
5826 
5827        /*
5828 	* If scaling is enabled and refresh rate didn't change
5829 	* we copy the vic and polarities of the old timings
5830 	*/
5831 	if (!scale || mode_refresh != preferred_refresh)
5832 		fill_stream_properties_from_drm_display_mode(
5833 			stream, &mode, &aconnector->base, con_state, NULL,
5834 			requested_bpc);
5835 	else
5836 		fill_stream_properties_from_drm_display_mode(
5837 			stream, &mode, &aconnector->base, con_state, old_stream,
5838 			requested_bpc);
5839 
5840 #if defined(CONFIG_DRM_AMD_DC_DCN)
5841 	/* SST DSC determination policy */
5842 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5843 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5844 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5845 #endif
5846 
5847 	update_stream_scaling_settings(&mode, dm_state, stream);
5848 
5849 	fill_audio_info(
5850 		&stream->audio_info,
5851 		drm_connector,
5852 		sink);
5853 
5854 	update_stream_signal(stream, sink);
5855 
5856 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5857 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5858 
5859 	if (stream->link->psr_settings.psr_feature_enabled) {
5860 		//
5861 		// should decide stream support vsc sdp colorimetry capability
5862 		// before building vsc info packet
5863 		//
5864 		stream->use_vsc_sdp_for_colorimetry = false;
5865 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5866 			stream->use_vsc_sdp_for_colorimetry =
5867 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5868 		} else {
5869 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5870 				stream->use_vsc_sdp_for_colorimetry = true;
5871 		}
5872 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5873 	}
5874 finish:
5875 	dc_sink_release(sink);
5876 
5877 	return stream;
5878 }
5879 
5880 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5881 {
5882 	drm_crtc_cleanup(crtc);
5883 	kfree(crtc);
5884 }
5885 
5886 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5887 				  struct drm_crtc_state *state)
5888 {
5889 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5890 
5891 	/* TODO Destroy dc_stream objects are stream object is flattened */
5892 	if (cur->stream)
5893 		dc_stream_release(cur->stream);
5894 
5895 
5896 	__drm_atomic_helper_crtc_destroy_state(state);
5897 
5898 
5899 	kfree(state);
5900 }
5901 
5902 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5903 {
5904 	struct dm_crtc_state *state;
5905 
5906 	if (crtc->state)
5907 		dm_crtc_destroy_state(crtc, crtc->state);
5908 
5909 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5910 	if (WARN_ON(!state))
5911 		return;
5912 
5913 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5914 }
5915 
5916 static struct drm_crtc_state *
5917 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5918 {
5919 	struct dm_crtc_state *state, *cur;
5920 
5921 	cur = to_dm_crtc_state(crtc->state);
5922 
5923 	if (WARN_ON(!crtc->state))
5924 		return NULL;
5925 
5926 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5927 	if (!state)
5928 		return NULL;
5929 
5930 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5931 
5932 	if (cur->stream) {
5933 		state->stream = cur->stream;
5934 		dc_stream_retain(state->stream);
5935 	}
5936 
5937 	state->active_planes = cur->active_planes;
5938 	state->vrr_infopacket = cur->vrr_infopacket;
5939 	state->abm_level = cur->abm_level;
5940 	state->vrr_supported = cur->vrr_supported;
5941 	state->freesync_config = cur->freesync_config;
5942 	state->cm_has_degamma = cur->cm_has_degamma;
5943 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5944 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5945 
5946 	return &state->base;
5947 }
5948 
5949 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5950 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5951 {
5952 	crtc_debugfs_init(crtc);
5953 
5954 	return 0;
5955 }
5956 #endif
5957 
5958 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5959 {
5960 	enum dc_irq_source irq_source;
5961 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5962 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5963 	int rc;
5964 
5965 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5966 
5967 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5968 
5969 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5970 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5971 	return rc;
5972 }
5973 
5974 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5975 {
5976 	enum dc_irq_source irq_source;
5977 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5978 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5979 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5980 #if defined(CONFIG_DRM_AMD_DC_DCN)
5981 	struct amdgpu_display_manager *dm = &adev->dm;
5982 	unsigned long flags;
5983 #endif
5984 	int rc = 0;
5985 
5986 	if (enable) {
5987 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5988 		if (amdgpu_dm_vrr_active(acrtc_state))
5989 			rc = dm_set_vupdate_irq(crtc, true);
5990 	} else {
5991 		/* vblank irq off -> vupdate irq off */
5992 		rc = dm_set_vupdate_irq(crtc, false);
5993 	}
5994 
5995 	if (rc)
5996 		return rc;
5997 
5998 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5999 
6000 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6001 		return -EBUSY;
6002 
6003 	if (amdgpu_in_reset(adev))
6004 		return 0;
6005 
6006 #if defined(CONFIG_DRM_AMD_DC_DCN)
6007 	spin_lock_irqsave(&dm->vblank_lock, flags);
6008 	dm->vblank_workqueue->dm = dm;
6009 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6010 	dm->vblank_workqueue->enable = enable;
6011 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
6012 	schedule_work(&dm->vblank_workqueue->mall_work);
6013 #endif
6014 
6015 	return 0;
6016 }
6017 
6018 static int dm_enable_vblank(struct drm_crtc *crtc)
6019 {
6020 	return dm_set_vblank(crtc, true);
6021 }
6022 
6023 static void dm_disable_vblank(struct drm_crtc *crtc)
6024 {
6025 	dm_set_vblank(crtc, false);
6026 }
6027 
6028 /* Implemented only the options currently availible for the driver */
6029 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6030 	.reset = dm_crtc_reset_state,
6031 	.destroy = amdgpu_dm_crtc_destroy,
6032 	.set_config = drm_atomic_helper_set_config,
6033 	.page_flip = drm_atomic_helper_page_flip,
6034 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6035 	.atomic_destroy_state = dm_crtc_destroy_state,
6036 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6037 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6038 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6039 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6040 	.enable_vblank = dm_enable_vblank,
6041 	.disable_vblank = dm_disable_vblank,
6042 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6043 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6044 	.late_register = amdgpu_dm_crtc_late_register,
6045 #endif
6046 };
6047 
6048 static enum drm_connector_status
6049 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6050 {
6051 	bool connected;
6052 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6053 
6054 	/*
6055 	 * Notes:
6056 	 * 1. This interface is NOT called in context of HPD irq.
6057 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6058 	 * makes it a bad place for *any* MST-related activity.
6059 	 */
6060 
6061 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6062 	    !aconnector->fake_enable)
6063 		connected = (aconnector->dc_sink != NULL);
6064 	else
6065 		connected = (aconnector->base.force == DRM_FORCE_ON);
6066 
6067 	update_subconnector_property(aconnector);
6068 
6069 	return (connected ? connector_status_connected :
6070 			connector_status_disconnected);
6071 }
6072 
6073 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6074 					    struct drm_connector_state *connector_state,
6075 					    struct drm_property *property,
6076 					    uint64_t val)
6077 {
6078 	struct drm_device *dev = connector->dev;
6079 	struct amdgpu_device *adev = drm_to_adev(dev);
6080 	struct dm_connector_state *dm_old_state =
6081 		to_dm_connector_state(connector->state);
6082 	struct dm_connector_state *dm_new_state =
6083 		to_dm_connector_state(connector_state);
6084 
6085 	int ret = -EINVAL;
6086 
6087 	if (property == dev->mode_config.scaling_mode_property) {
6088 		enum amdgpu_rmx_type rmx_type;
6089 
6090 		switch (val) {
6091 		case DRM_MODE_SCALE_CENTER:
6092 			rmx_type = RMX_CENTER;
6093 			break;
6094 		case DRM_MODE_SCALE_ASPECT:
6095 			rmx_type = RMX_ASPECT;
6096 			break;
6097 		case DRM_MODE_SCALE_FULLSCREEN:
6098 			rmx_type = RMX_FULL;
6099 			break;
6100 		case DRM_MODE_SCALE_NONE:
6101 		default:
6102 			rmx_type = RMX_OFF;
6103 			break;
6104 		}
6105 
6106 		if (dm_old_state->scaling == rmx_type)
6107 			return 0;
6108 
6109 		dm_new_state->scaling = rmx_type;
6110 		ret = 0;
6111 	} else if (property == adev->mode_info.underscan_hborder_property) {
6112 		dm_new_state->underscan_hborder = val;
6113 		ret = 0;
6114 	} else if (property == adev->mode_info.underscan_vborder_property) {
6115 		dm_new_state->underscan_vborder = val;
6116 		ret = 0;
6117 	} else if (property == adev->mode_info.underscan_property) {
6118 		dm_new_state->underscan_enable = val;
6119 		ret = 0;
6120 	} else if (property == adev->mode_info.abm_level_property) {
6121 		dm_new_state->abm_level = val;
6122 		ret = 0;
6123 	}
6124 
6125 	return ret;
6126 }
6127 
6128 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6129 					    const struct drm_connector_state *state,
6130 					    struct drm_property *property,
6131 					    uint64_t *val)
6132 {
6133 	struct drm_device *dev = connector->dev;
6134 	struct amdgpu_device *adev = drm_to_adev(dev);
6135 	struct dm_connector_state *dm_state =
6136 		to_dm_connector_state(state);
6137 	int ret = -EINVAL;
6138 
6139 	if (property == dev->mode_config.scaling_mode_property) {
6140 		switch (dm_state->scaling) {
6141 		case RMX_CENTER:
6142 			*val = DRM_MODE_SCALE_CENTER;
6143 			break;
6144 		case RMX_ASPECT:
6145 			*val = DRM_MODE_SCALE_ASPECT;
6146 			break;
6147 		case RMX_FULL:
6148 			*val = DRM_MODE_SCALE_FULLSCREEN;
6149 			break;
6150 		case RMX_OFF:
6151 		default:
6152 			*val = DRM_MODE_SCALE_NONE;
6153 			break;
6154 		}
6155 		ret = 0;
6156 	} else if (property == adev->mode_info.underscan_hborder_property) {
6157 		*val = dm_state->underscan_hborder;
6158 		ret = 0;
6159 	} else if (property == adev->mode_info.underscan_vborder_property) {
6160 		*val = dm_state->underscan_vborder;
6161 		ret = 0;
6162 	} else if (property == adev->mode_info.underscan_property) {
6163 		*val = dm_state->underscan_enable;
6164 		ret = 0;
6165 	} else if (property == adev->mode_info.abm_level_property) {
6166 		*val = dm_state->abm_level;
6167 		ret = 0;
6168 	}
6169 
6170 	return ret;
6171 }
6172 
6173 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6174 {
6175 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6176 
6177 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6178 }
6179 
6180 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6181 {
6182 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6183 	const struct dc_link *link = aconnector->dc_link;
6184 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6185 	struct amdgpu_display_manager *dm = &adev->dm;
6186 
6187 	/*
6188 	 * Call only if mst_mgr was iniitalized before since it's not done
6189 	 * for all connector types.
6190 	 */
6191 	if (aconnector->mst_mgr.dev)
6192 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6193 
6194 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6195 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6196 
6197 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6198 	    link->type != dc_connection_none &&
6199 	    dm->backlight_dev) {
6200 		backlight_device_unregister(dm->backlight_dev);
6201 		dm->backlight_dev = NULL;
6202 	}
6203 #endif
6204 
6205 	if (aconnector->dc_em_sink)
6206 		dc_sink_release(aconnector->dc_em_sink);
6207 	aconnector->dc_em_sink = NULL;
6208 	if (aconnector->dc_sink)
6209 		dc_sink_release(aconnector->dc_sink);
6210 	aconnector->dc_sink = NULL;
6211 
6212 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6213 	drm_connector_unregister(connector);
6214 	drm_connector_cleanup(connector);
6215 	if (aconnector->i2c) {
6216 		i2c_del_adapter(&aconnector->i2c->base);
6217 		kfree(aconnector->i2c);
6218 	}
6219 	kfree(aconnector->dm_dp_aux.aux.name);
6220 
6221 	kfree(connector);
6222 }
6223 
6224 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6225 {
6226 	struct dm_connector_state *state =
6227 		to_dm_connector_state(connector->state);
6228 
6229 	if (connector->state)
6230 		__drm_atomic_helper_connector_destroy_state(connector->state);
6231 
6232 	kfree(state);
6233 
6234 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6235 
6236 	if (state) {
6237 		state->scaling = RMX_OFF;
6238 		state->underscan_enable = false;
6239 		state->underscan_hborder = 0;
6240 		state->underscan_vborder = 0;
6241 		state->base.max_requested_bpc = 8;
6242 		state->vcpi_slots = 0;
6243 		state->pbn = 0;
6244 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6245 			state->abm_level = amdgpu_dm_abm_level;
6246 
6247 		__drm_atomic_helper_connector_reset(connector, &state->base);
6248 	}
6249 }
6250 
6251 struct drm_connector_state *
6252 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6253 {
6254 	struct dm_connector_state *state =
6255 		to_dm_connector_state(connector->state);
6256 
6257 	struct dm_connector_state *new_state =
6258 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6259 
6260 	if (!new_state)
6261 		return NULL;
6262 
6263 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6264 
6265 	new_state->freesync_capable = state->freesync_capable;
6266 	new_state->abm_level = state->abm_level;
6267 	new_state->scaling = state->scaling;
6268 	new_state->underscan_enable = state->underscan_enable;
6269 	new_state->underscan_hborder = state->underscan_hborder;
6270 	new_state->underscan_vborder = state->underscan_vborder;
6271 	new_state->vcpi_slots = state->vcpi_slots;
6272 	new_state->pbn = state->pbn;
6273 	return &new_state->base;
6274 }
6275 
6276 static int
6277 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6278 {
6279 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6280 		to_amdgpu_dm_connector(connector);
6281 	int r;
6282 
6283 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6284 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6285 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6286 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6287 		if (r)
6288 			return r;
6289 	}
6290 
6291 #if defined(CONFIG_DEBUG_FS)
6292 	connector_debugfs_init(amdgpu_dm_connector);
6293 #endif
6294 
6295 	return 0;
6296 }
6297 
6298 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6299 	.reset = amdgpu_dm_connector_funcs_reset,
6300 	.detect = amdgpu_dm_connector_detect,
6301 	.fill_modes = drm_helper_probe_single_connector_modes,
6302 	.destroy = amdgpu_dm_connector_destroy,
6303 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6304 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6305 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6306 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6307 	.late_register = amdgpu_dm_connector_late_register,
6308 	.early_unregister = amdgpu_dm_connector_unregister
6309 };
6310 
6311 static int get_modes(struct drm_connector *connector)
6312 {
6313 	return amdgpu_dm_connector_get_modes(connector);
6314 }
6315 
6316 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6317 {
6318 	struct dc_sink_init_data init_params = {
6319 			.link = aconnector->dc_link,
6320 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6321 	};
6322 	struct edid *edid;
6323 
6324 	if (!aconnector->base.edid_blob_ptr) {
6325 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6326 				aconnector->base.name);
6327 
6328 		aconnector->base.force = DRM_FORCE_OFF;
6329 		aconnector->base.override_edid = false;
6330 		return;
6331 	}
6332 
6333 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6334 
6335 	aconnector->edid = edid;
6336 
6337 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6338 		aconnector->dc_link,
6339 		(uint8_t *)edid,
6340 		(edid->extensions + 1) * EDID_LENGTH,
6341 		&init_params);
6342 
6343 	if (aconnector->base.force == DRM_FORCE_ON) {
6344 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6345 		aconnector->dc_link->local_sink :
6346 		aconnector->dc_em_sink;
6347 		dc_sink_retain(aconnector->dc_sink);
6348 	}
6349 }
6350 
6351 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6352 {
6353 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6354 
6355 	/*
6356 	 * In case of headless boot with force on for DP managed connector
6357 	 * Those settings have to be != 0 to get initial modeset
6358 	 */
6359 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6360 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6361 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6362 	}
6363 
6364 
6365 	aconnector->base.override_edid = true;
6366 	create_eml_sink(aconnector);
6367 }
6368 
6369 static struct dc_stream_state *
6370 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6371 				const struct drm_display_mode *drm_mode,
6372 				const struct dm_connector_state *dm_state,
6373 				const struct dc_stream_state *old_stream)
6374 {
6375 	struct drm_connector *connector = &aconnector->base;
6376 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6377 	struct dc_stream_state *stream;
6378 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6379 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6380 	enum dc_status dc_result = DC_OK;
6381 
6382 	do {
6383 		stream = create_stream_for_sink(aconnector, drm_mode,
6384 						dm_state, old_stream,
6385 						requested_bpc);
6386 		if (stream == NULL) {
6387 			DRM_ERROR("Failed to create stream for sink!\n");
6388 			break;
6389 		}
6390 
6391 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6392 
6393 		if (dc_result != DC_OK) {
6394 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6395 				      drm_mode->hdisplay,
6396 				      drm_mode->vdisplay,
6397 				      drm_mode->clock,
6398 				      dc_result,
6399 				      dc_status_to_str(dc_result));
6400 
6401 			dc_stream_release(stream);
6402 			stream = NULL;
6403 			requested_bpc -= 2; /* lower bpc to retry validation */
6404 		}
6405 
6406 	} while (stream == NULL && requested_bpc >= 6);
6407 
6408 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6409 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6410 
6411 		aconnector->force_yuv420_output = true;
6412 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6413 						dm_state, old_stream);
6414 		aconnector->force_yuv420_output = false;
6415 	}
6416 
6417 	return stream;
6418 }
6419 
6420 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6421 				   struct drm_display_mode *mode)
6422 {
6423 	int result = MODE_ERROR;
6424 	struct dc_sink *dc_sink;
6425 	/* TODO: Unhardcode stream count */
6426 	struct dc_stream_state *stream;
6427 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6428 
6429 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6430 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6431 		return result;
6432 
6433 	/*
6434 	 * Only run this the first time mode_valid is called to initilialize
6435 	 * EDID mgmt
6436 	 */
6437 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6438 		!aconnector->dc_em_sink)
6439 		handle_edid_mgmt(aconnector);
6440 
6441 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6442 
6443 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6444 				aconnector->base.force != DRM_FORCE_ON) {
6445 		DRM_ERROR("dc_sink is NULL!\n");
6446 		goto fail;
6447 	}
6448 
6449 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6450 	if (stream) {
6451 		dc_stream_release(stream);
6452 		result = MODE_OK;
6453 	}
6454 
6455 fail:
6456 	/* TODO: error handling*/
6457 	return result;
6458 }
6459 
6460 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6461 				struct dc_info_packet *out)
6462 {
6463 	struct hdmi_drm_infoframe frame;
6464 	unsigned char buf[30]; /* 26 + 4 */
6465 	ssize_t len;
6466 	int ret, i;
6467 
6468 	memset(out, 0, sizeof(*out));
6469 
6470 	if (!state->hdr_output_metadata)
6471 		return 0;
6472 
6473 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6474 	if (ret)
6475 		return ret;
6476 
6477 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6478 	if (len < 0)
6479 		return (int)len;
6480 
6481 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6482 	if (len != 30)
6483 		return -EINVAL;
6484 
6485 	/* Prepare the infopacket for DC. */
6486 	switch (state->connector->connector_type) {
6487 	case DRM_MODE_CONNECTOR_HDMIA:
6488 		out->hb0 = 0x87; /* type */
6489 		out->hb1 = 0x01; /* version */
6490 		out->hb2 = 0x1A; /* length */
6491 		out->sb[0] = buf[3]; /* checksum */
6492 		i = 1;
6493 		break;
6494 
6495 	case DRM_MODE_CONNECTOR_DisplayPort:
6496 	case DRM_MODE_CONNECTOR_eDP:
6497 		out->hb0 = 0x00; /* sdp id, zero */
6498 		out->hb1 = 0x87; /* type */
6499 		out->hb2 = 0x1D; /* payload len - 1 */
6500 		out->hb3 = (0x13 << 2); /* sdp version */
6501 		out->sb[0] = 0x01; /* version */
6502 		out->sb[1] = 0x1A; /* length */
6503 		i = 2;
6504 		break;
6505 
6506 	default:
6507 		return -EINVAL;
6508 	}
6509 
6510 	memcpy(&out->sb[i], &buf[4], 26);
6511 	out->valid = true;
6512 
6513 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6514 		       sizeof(out->sb), false);
6515 
6516 	return 0;
6517 }
6518 
6519 static int
6520 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6521 				 struct drm_atomic_state *state)
6522 {
6523 	struct drm_connector_state *new_con_state =
6524 		drm_atomic_get_new_connector_state(state, conn);
6525 	struct drm_connector_state *old_con_state =
6526 		drm_atomic_get_old_connector_state(state, conn);
6527 	struct drm_crtc *crtc = new_con_state->crtc;
6528 	struct drm_crtc_state *new_crtc_state;
6529 	int ret;
6530 
6531 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6532 
6533 	if (!crtc)
6534 		return 0;
6535 
6536 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6537 		struct dc_info_packet hdr_infopacket;
6538 
6539 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6540 		if (ret)
6541 			return ret;
6542 
6543 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6544 		if (IS_ERR(new_crtc_state))
6545 			return PTR_ERR(new_crtc_state);
6546 
6547 		/*
6548 		 * DC considers the stream backends changed if the
6549 		 * static metadata changes. Forcing the modeset also
6550 		 * gives a simple way for userspace to switch from
6551 		 * 8bpc to 10bpc when setting the metadata to enter
6552 		 * or exit HDR.
6553 		 *
6554 		 * Changing the static metadata after it's been
6555 		 * set is permissible, however. So only force a
6556 		 * modeset if we're entering or exiting HDR.
6557 		 */
6558 		new_crtc_state->mode_changed =
6559 			!old_con_state->hdr_output_metadata ||
6560 			!new_con_state->hdr_output_metadata;
6561 	}
6562 
6563 	return 0;
6564 }
6565 
6566 static const struct drm_connector_helper_funcs
6567 amdgpu_dm_connector_helper_funcs = {
6568 	/*
6569 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6570 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6571 	 * are missing after user start lightdm. So we need to renew modes list.
6572 	 * in get_modes call back, not just return the modes count
6573 	 */
6574 	.get_modes = get_modes,
6575 	.mode_valid = amdgpu_dm_connector_mode_valid,
6576 	.atomic_check = amdgpu_dm_connector_atomic_check,
6577 };
6578 
6579 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6580 {
6581 }
6582 
6583 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6584 {
6585 	struct drm_atomic_state *state = new_crtc_state->state;
6586 	struct drm_plane *plane;
6587 	int num_active = 0;
6588 
6589 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6590 		struct drm_plane_state *new_plane_state;
6591 
6592 		/* Cursor planes are "fake". */
6593 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6594 			continue;
6595 
6596 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6597 
6598 		if (!new_plane_state) {
6599 			/*
6600 			 * The plane is enable on the CRTC and hasn't changed
6601 			 * state. This means that it previously passed
6602 			 * validation and is therefore enabled.
6603 			 */
6604 			num_active += 1;
6605 			continue;
6606 		}
6607 
6608 		/* We need a framebuffer to be considered enabled. */
6609 		num_active += (new_plane_state->fb != NULL);
6610 	}
6611 
6612 	return num_active;
6613 }
6614 
6615 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6616 					 struct drm_crtc_state *new_crtc_state)
6617 {
6618 	struct dm_crtc_state *dm_new_crtc_state =
6619 		to_dm_crtc_state(new_crtc_state);
6620 
6621 	dm_new_crtc_state->active_planes = 0;
6622 
6623 	if (!dm_new_crtc_state->stream)
6624 		return;
6625 
6626 	dm_new_crtc_state->active_planes =
6627 		count_crtc_active_planes(new_crtc_state);
6628 }
6629 
6630 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6631 				       struct drm_atomic_state *state)
6632 {
6633 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6634 									  crtc);
6635 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6636 	struct dc *dc = adev->dm.dc;
6637 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6638 	int ret = -EINVAL;
6639 
6640 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6641 
6642 	dm_update_crtc_active_planes(crtc, crtc_state);
6643 
6644 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6645 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6646 		return ret;
6647 	}
6648 
6649 	/*
6650 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6651 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6652 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6653 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6654 	 */
6655 	if (crtc_state->enable &&
6656 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6657 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6658 		return -EINVAL;
6659 	}
6660 
6661 	/* In some use cases, like reset, no stream is attached */
6662 	if (!dm_crtc_state->stream)
6663 		return 0;
6664 
6665 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6666 		return 0;
6667 
6668 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6669 	return ret;
6670 }
6671 
6672 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6673 				      const struct drm_display_mode *mode,
6674 				      struct drm_display_mode *adjusted_mode)
6675 {
6676 	return true;
6677 }
6678 
6679 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6680 	.disable = dm_crtc_helper_disable,
6681 	.atomic_check = dm_crtc_helper_atomic_check,
6682 	.mode_fixup = dm_crtc_helper_mode_fixup,
6683 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6684 };
6685 
6686 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6687 {
6688 
6689 }
6690 
6691 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6692 {
6693 	switch (display_color_depth) {
6694 		case COLOR_DEPTH_666:
6695 			return 6;
6696 		case COLOR_DEPTH_888:
6697 			return 8;
6698 		case COLOR_DEPTH_101010:
6699 			return 10;
6700 		case COLOR_DEPTH_121212:
6701 			return 12;
6702 		case COLOR_DEPTH_141414:
6703 			return 14;
6704 		case COLOR_DEPTH_161616:
6705 			return 16;
6706 		default:
6707 			break;
6708 		}
6709 	return 0;
6710 }
6711 
6712 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6713 					  struct drm_crtc_state *crtc_state,
6714 					  struct drm_connector_state *conn_state)
6715 {
6716 	struct drm_atomic_state *state = crtc_state->state;
6717 	struct drm_connector *connector = conn_state->connector;
6718 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6719 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6720 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6721 	struct drm_dp_mst_topology_mgr *mst_mgr;
6722 	struct drm_dp_mst_port *mst_port;
6723 	enum dc_color_depth color_depth;
6724 	int clock, bpp = 0;
6725 	bool is_y420 = false;
6726 
6727 	if (!aconnector->port || !aconnector->dc_sink)
6728 		return 0;
6729 
6730 	mst_port = aconnector->port;
6731 	mst_mgr = &aconnector->mst_port->mst_mgr;
6732 
6733 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6734 		return 0;
6735 
6736 	if (!state->duplicated) {
6737 		int max_bpc = conn_state->max_requested_bpc;
6738 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6739 				aconnector->force_yuv420_output;
6740 		color_depth = convert_color_depth_from_display_info(connector,
6741 								    is_y420,
6742 								    max_bpc);
6743 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6744 		clock = adjusted_mode->clock;
6745 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6746 	}
6747 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6748 									   mst_mgr,
6749 									   mst_port,
6750 									   dm_new_connector_state->pbn,
6751 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6752 	if (dm_new_connector_state->vcpi_slots < 0) {
6753 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6754 		return dm_new_connector_state->vcpi_slots;
6755 	}
6756 	return 0;
6757 }
6758 
6759 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6760 	.disable = dm_encoder_helper_disable,
6761 	.atomic_check = dm_encoder_helper_atomic_check
6762 };
6763 
6764 #if defined(CONFIG_DRM_AMD_DC_DCN)
6765 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6766 					    struct dc_state *dc_state)
6767 {
6768 	struct dc_stream_state *stream = NULL;
6769 	struct drm_connector *connector;
6770 	struct drm_connector_state *new_con_state;
6771 	struct amdgpu_dm_connector *aconnector;
6772 	struct dm_connector_state *dm_conn_state;
6773 	int i, j, clock, bpp;
6774 	int vcpi, pbn_div, pbn = 0;
6775 
6776 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6777 
6778 		aconnector = to_amdgpu_dm_connector(connector);
6779 
6780 		if (!aconnector->port)
6781 			continue;
6782 
6783 		if (!new_con_state || !new_con_state->crtc)
6784 			continue;
6785 
6786 		dm_conn_state = to_dm_connector_state(new_con_state);
6787 
6788 		for (j = 0; j < dc_state->stream_count; j++) {
6789 			stream = dc_state->streams[j];
6790 			if (!stream)
6791 				continue;
6792 
6793 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6794 				break;
6795 
6796 			stream = NULL;
6797 		}
6798 
6799 		if (!stream)
6800 			continue;
6801 
6802 		if (stream->timing.flags.DSC != 1) {
6803 			drm_dp_mst_atomic_enable_dsc(state,
6804 						     aconnector->port,
6805 						     dm_conn_state->pbn,
6806 						     0,
6807 						     false);
6808 			continue;
6809 		}
6810 
6811 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6812 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6813 		clock = stream->timing.pix_clk_100hz / 10;
6814 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6815 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6816 						    aconnector->port,
6817 						    pbn, pbn_div,
6818 						    true);
6819 		if (vcpi < 0)
6820 			return vcpi;
6821 
6822 		dm_conn_state->pbn = pbn;
6823 		dm_conn_state->vcpi_slots = vcpi;
6824 	}
6825 	return 0;
6826 }
6827 #endif
6828 
6829 static void dm_drm_plane_reset(struct drm_plane *plane)
6830 {
6831 	struct dm_plane_state *amdgpu_state = NULL;
6832 
6833 	if (plane->state)
6834 		plane->funcs->atomic_destroy_state(plane, plane->state);
6835 
6836 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6837 	WARN_ON(amdgpu_state == NULL);
6838 
6839 	if (amdgpu_state)
6840 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6841 }
6842 
6843 static struct drm_plane_state *
6844 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6845 {
6846 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6847 
6848 	old_dm_plane_state = to_dm_plane_state(plane->state);
6849 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6850 	if (!dm_plane_state)
6851 		return NULL;
6852 
6853 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6854 
6855 	if (old_dm_plane_state->dc_state) {
6856 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6857 		dc_plane_state_retain(dm_plane_state->dc_state);
6858 	}
6859 
6860 	return &dm_plane_state->base;
6861 }
6862 
6863 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6864 				struct drm_plane_state *state)
6865 {
6866 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6867 
6868 	if (dm_plane_state->dc_state)
6869 		dc_plane_state_release(dm_plane_state->dc_state);
6870 
6871 	drm_atomic_helper_plane_destroy_state(plane, state);
6872 }
6873 
6874 static const struct drm_plane_funcs dm_plane_funcs = {
6875 	.update_plane	= drm_atomic_helper_update_plane,
6876 	.disable_plane	= drm_atomic_helper_disable_plane,
6877 	.destroy	= drm_primary_helper_destroy,
6878 	.reset = dm_drm_plane_reset,
6879 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6880 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6881 	.format_mod_supported = dm_plane_format_mod_supported,
6882 };
6883 
6884 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6885 				      struct drm_plane_state *new_state)
6886 {
6887 	struct amdgpu_framebuffer *afb;
6888 	struct drm_gem_object *obj;
6889 	struct amdgpu_device *adev;
6890 	struct amdgpu_bo *rbo;
6891 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6892 	struct list_head list;
6893 	struct ttm_validate_buffer tv;
6894 	struct ww_acquire_ctx ticket;
6895 	uint32_t domain;
6896 	int r;
6897 
6898 	if (!new_state->fb) {
6899 		DRM_DEBUG_KMS("No FB bound\n");
6900 		return 0;
6901 	}
6902 
6903 	afb = to_amdgpu_framebuffer(new_state->fb);
6904 	obj = new_state->fb->obj[0];
6905 	rbo = gem_to_amdgpu_bo(obj);
6906 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6907 	INIT_LIST_HEAD(&list);
6908 
6909 	tv.bo = &rbo->tbo;
6910 	tv.num_shared = 1;
6911 	list_add(&tv.head, &list);
6912 
6913 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6914 	if (r) {
6915 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6916 		return r;
6917 	}
6918 
6919 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6920 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6921 	else
6922 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6923 
6924 	r = amdgpu_bo_pin(rbo, domain);
6925 	if (unlikely(r != 0)) {
6926 		if (r != -ERESTARTSYS)
6927 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6928 		ttm_eu_backoff_reservation(&ticket, &list);
6929 		return r;
6930 	}
6931 
6932 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6933 	if (unlikely(r != 0)) {
6934 		amdgpu_bo_unpin(rbo);
6935 		ttm_eu_backoff_reservation(&ticket, &list);
6936 		DRM_ERROR("%p bind failed\n", rbo);
6937 		return r;
6938 	}
6939 
6940 	ttm_eu_backoff_reservation(&ticket, &list);
6941 
6942 	afb->address = amdgpu_bo_gpu_offset(rbo);
6943 
6944 	amdgpu_bo_ref(rbo);
6945 
6946 	/**
6947 	 * We don't do surface updates on planes that have been newly created,
6948 	 * but we also don't have the afb->address during atomic check.
6949 	 *
6950 	 * Fill in buffer attributes depending on the address here, but only on
6951 	 * newly created planes since they're not being used by DC yet and this
6952 	 * won't modify global state.
6953 	 */
6954 	dm_plane_state_old = to_dm_plane_state(plane->state);
6955 	dm_plane_state_new = to_dm_plane_state(new_state);
6956 
6957 	if (dm_plane_state_new->dc_state &&
6958 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6959 		struct dc_plane_state *plane_state =
6960 			dm_plane_state_new->dc_state;
6961 		bool force_disable_dcc = !plane_state->dcc.enable;
6962 
6963 		fill_plane_buffer_attributes(
6964 			adev, afb, plane_state->format, plane_state->rotation,
6965 			afb->tiling_flags,
6966 			&plane_state->tiling_info, &plane_state->plane_size,
6967 			&plane_state->dcc, &plane_state->address,
6968 			afb->tmz_surface, force_disable_dcc);
6969 	}
6970 
6971 	return 0;
6972 }
6973 
6974 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6975 				       struct drm_plane_state *old_state)
6976 {
6977 	struct amdgpu_bo *rbo;
6978 	int r;
6979 
6980 	if (!old_state->fb)
6981 		return;
6982 
6983 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6984 	r = amdgpu_bo_reserve(rbo, false);
6985 	if (unlikely(r)) {
6986 		DRM_ERROR("failed to reserve rbo before unpin\n");
6987 		return;
6988 	}
6989 
6990 	amdgpu_bo_unpin(rbo);
6991 	amdgpu_bo_unreserve(rbo);
6992 	amdgpu_bo_unref(&rbo);
6993 }
6994 
6995 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6996 				       struct drm_crtc_state *new_crtc_state)
6997 {
6998 	struct drm_framebuffer *fb = state->fb;
6999 	int min_downscale, max_upscale;
7000 	int min_scale = 0;
7001 	int max_scale = INT_MAX;
7002 
7003 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7004 	if (fb && state->crtc) {
7005 		/* Validate viewport to cover the case when only the position changes */
7006 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7007 			int viewport_width = state->crtc_w;
7008 			int viewport_height = state->crtc_h;
7009 
7010 			if (state->crtc_x < 0)
7011 				viewport_width += state->crtc_x;
7012 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7013 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7014 
7015 			if (state->crtc_y < 0)
7016 				viewport_height += state->crtc_y;
7017 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7018 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7019 
7020 			if (viewport_width < 0 || viewport_height < 0) {
7021 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7022 				return -EINVAL;
7023 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7024 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7025 				return -EINVAL;
7026 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7027 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7028 				return -EINVAL;
7029 			}
7030 
7031 		}
7032 
7033 		/* Get min/max allowed scaling factors from plane caps. */
7034 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7035 					     &min_downscale, &max_upscale);
7036 		/*
7037 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7038 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7039 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7040 		 */
7041 		min_scale = (1000 << 16) / max_upscale;
7042 		max_scale = (1000 << 16) / min_downscale;
7043 	}
7044 
7045 	return drm_atomic_helper_check_plane_state(
7046 		state, new_crtc_state, min_scale, max_scale, true, true);
7047 }
7048 
7049 static int dm_plane_atomic_check(struct drm_plane *plane,
7050 				 struct drm_atomic_state *state)
7051 {
7052 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7053 										 plane);
7054 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7055 	struct dc *dc = adev->dm.dc;
7056 	struct dm_plane_state *dm_plane_state;
7057 	struct dc_scaling_info scaling_info;
7058 	struct drm_crtc_state *new_crtc_state;
7059 	int ret;
7060 
7061 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7062 
7063 	dm_plane_state = to_dm_plane_state(new_plane_state);
7064 
7065 	if (!dm_plane_state->dc_state)
7066 		return 0;
7067 
7068 	new_crtc_state =
7069 		drm_atomic_get_new_crtc_state(state,
7070 					      new_plane_state->crtc);
7071 	if (!new_crtc_state)
7072 		return -EINVAL;
7073 
7074 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7075 	if (ret)
7076 		return ret;
7077 
7078 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7079 	if (ret)
7080 		return ret;
7081 
7082 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7083 		return 0;
7084 
7085 	return -EINVAL;
7086 }
7087 
7088 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7089 				       struct drm_atomic_state *state)
7090 {
7091 	/* Only support async updates on cursor planes. */
7092 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7093 		return -EINVAL;
7094 
7095 	return 0;
7096 }
7097 
7098 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7099 					 struct drm_atomic_state *state)
7100 {
7101 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7102 									   plane);
7103 	struct drm_plane_state *old_state =
7104 		drm_atomic_get_old_plane_state(state, plane);
7105 
7106 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7107 
7108 	swap(plane->state->fb, new_state->fb);
7109 
7110 	plane->state->src_x = new_state->src_x;
7111 	plane->state->src_y = new_state->src_y;
7112 	plane->state->src_w = new_state->src_w;
7113 	plane->state->src_h = new_state->src_h;
7114 	plane->state->crtc_x = new_state->crtc_x;
7115 	plane->state->crtc_y = new_state->crtc_y;
7116 	plane->state->crtc_w = new_state->crtc_w;
7117 	plane->state->crtc_h = new_state->crtc_h;
7118 
7119 	handle_cursor_update(plane, old_state);
7120 }
7121 
7122 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7123 	.prepare_fb = dm_plane_helper_prepare_fb,
7124 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7125 	.atomic_check = dm_plane_atomic_check,
7126 	.atomic_async_check = dm_plane_atomic_async_check,
7127 	.atomic_async_update = dm_plane_atomic_async_update
7128 };
7129 
7130 /*
7131  * TODO: these are currently initialized to rgb formats only.
7132  * For future use cases we should either initialize them dynamically based on
7133  * plane capabilities, or initialize this array to all formats, so internal drm
7134  * check will succeed, and let DC implement proper check
7135  */
7136 static const uint32_t rgb_formats[] = {
7137 	DRM_FORMAT_XRGB8888,
7138 	DRM_FORMAT_ARGB8888,
7139 	DRM_FORMAT_RGBA8888,
7140 	DRM_FORMAT_XRGB2101010,
7141 	DRM_FORMAT_XBGR2101010,
7142 	DRM_FORMAT_ARGB2101010,
7143 	DRM_FORMAT_ABGR2101010,
7144 	DRM_FORMAT_XRGB16161616,
7145 	DRM_FORMAT_XBGR16161616,
7146 	DRM_FORMAT_ARGB16161616,
7147 	DRM_FORMAT_ABGR16161616,
7148 	DRM_FORMAT_XBGR8888,
7149 	DRM_FORMAT_ABGR8888,
7150 	DRM_FORMAT_RGB565,
7151 };
7152 
7153 static const uint32_t overlay_formats[] = {
7154 	DRM_FORMAT_XRGB8888,
7155 	DRM_FORMAT_ARGB8888,
7156 	DRM_FORMAT_RGBA8888,
7157 	DRM_FORMAT_XBGR8888,
7158 	DRM_FORMAT_ABGR8888,
7159 	DRM_FORMAT_RGB565
7160 };
7161 
7162 static const u32 cursor_formats[] = {
7163 	DRM_FORMAT_ARGB8888
7164 };
7165 
7166 static int get_plane_formats(const struct drm_plane *plane,
7167 			     const struct dc_plane_cap *plane_cap,
7168 			     uint32_t *formats, int max_formats)
7169 {
7170 	int i, num_formats = 0;
7171 
7172 	/*
7173 	 * TODO: Query support for each group of formats directly from
7174 	 * DC plane caps. This will require adding more formats to the
7175 	 * caps list.
7176 	 */
7177 
7178 	switch (plane->type) {
7179 	case DRM_PLANE_TYPE_PRIMARY:
7180 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7181 			if (num_formats >= max_formats)
7182 				break;
7183 
7184 			formats[num_formats++] = rgb_formats[i];
7185 		}
7186 
7187 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7188 			formats[num_formats++] = DRM_FORMAT_NV12;
7189 		if (plane_cap && plane_cap->pixel_format_support.p010)
7190 			formats[num_formats++] = DRM_FORMAT_P010;
7191 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7192 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7193 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7194 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7195 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7196 		}
7197 		break;
7198 
7199 	case DRM_PLANE_TYPE_OVERLAY:
7200 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7201 			if (num_formats >= max_formats)
7202 				break;
7203 
7204 			formats[num_formats++] = overlay_formats[i];
7205 		}
7206 		break;
7207 
7208 	case DRM_PLANE_TYPE_CURSOR:
7209 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7210 			if (num_formats >= max_formats)
7211 				break;
7212 
7213 			formats[num_formats++] = cursor_formats[i];
7214 		}
7215 		break;
7216 	}
7217 
7218 	return num_formats;
7219 }
7220 
7221 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7222 				struct drm_plane *plane,
7223 				unsigned long possible_crtcs,
7224 				const struct dc_plane_cap *plane_cap)
7225 {
7226 	uint32_t formats[32];
7227 	int num_formats;
7228 	int res = -EPERM;
7229 	unsigned int supported_rotations;
7230 	uint64_t *modifiers = NULL;
7231 
7232 	num_formats = get_plane_formats(plane, plane_cap, formats,
7233 					ARRAY_SIZE(formats));
7234 
7235 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7236 	if (res)
7237 		return res;
7238 
7239 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7240 				       &dm_plane_funcs, formats, num_formats,
7241 				       modifiers, plane->type, NULL);
7242 	kfree(modifiers);
7243 	if (res)
7244 		return res;
7245 
7246 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7247 	    plane_cap && plane_cap->per_pixel_alpha) {
7248 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7249 					  BIT(DRM_MODE_BLEND_PREMULTI);
7250 
7251 		drm_plane_create_alpha_property(plane);
7252 		drm_plane_create_blend_mode_property(plane, blend_caps);
7253 	}
7254 
7255 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7256 	    plane_cap &&
7257 	    (plane_cap->pixel_format_support.nv12 ||
7258 	     plane_cap->pixel_format_support.p010)) {
7259 		/* This only affects YUV formats. */
7260 		drm_plane_create_color_properties(
7261 			plane,
7262 			BIT(DRM_COLOR_YCBCR_BT601) |
7263 			BIT(DRM_COLOR_YCBCR_BT709) |
7264 			BIT(DRM_COLOR_YCBCR_BT2020),
7265 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7266 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7267 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7268 	}
7269 
7270 	supported_rotations =
7271 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7272 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7273 
7274 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7275 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7276 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7277 						   supported_rotations);
7278 
7279 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7280 
7281 	/* Create (reset) the plane state */
7282 	if (plane->funcs->reset)
7283 		plane->funcs->reset(plane);
7284 
7285 	return 0;
7286 }
7287 
7288 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7289 			       struct drm_plane *plane,
7290 			       uint32_t crtc_index)
7291 {
7292 	struct amdgpu_crtc *acrtc = NULL;
7293 	struct drm_plane *cursor_plane;
7294 
7295 	int res = -ENOMEM;
7296 
7297 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7298 	if (!cursor_plane)
7299 		goto fail;
7300 
7301 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7302 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7303 
7304 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7305 	if (!acrtc)
7306 		goto fail;
7307 
7308 	res = drm_crtc_init_with_planes(
7309 			dm->ddev,
7310 			&acrtc->base,
7311 			plane,
7312 			cursor_plane,
7313 			&amdgpu_dm_crtc_funcs, NULL);
7314 
7315 	if (res)
7316 		goto fail;
7317 
7318 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7319 
7320 	/* Create (reset) the plane state */
7321 	if (acrtc->base.funcs->reset)
7322 		acrtc->base.funcs->reset(&acrtc->base);
7323 
7324 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7325 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7326 
7327 	acrtc->crtc_id = crtc_index;
7328 	acrtc->base.enabled = false;
7329 	acrtc->otg_inst = -1;
7330 
7331 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7332 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7333 				   true, MAX_COLOR_LUT_ENTRIES);
7334 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7335 
7336 	return 0;
7337 
7338 fail:
7339 	kfree(acrtc);
7340 	kfree(cursor_plane);
7341 	return res;
7342 }
7343 
7344 
7345 static int to_drm_connector_type(enum signal_type st)
7346 {
7347 	switch (st) {
7348 	case SIGNAL_TYPE_HDMI_TYPE_A:
7349 		return DRM_MODE_CONNECTOR_HDMIA;
7350 	case SIGNAL_TYPE_EDP:
7351 		return DRM_MODE_CONNECTOR_eDP;
7352 	case SIGNAL_TYPE_LVDS:
7353 		return DRM_MODE_CONNECTOR_LVDS;
7354 	case SIGNAL_TYPE_RGB:
7355 		return DRM_MODE_CONNECTOR_VGA;
7356 	case SIGNAL_TYPE_DISPLAY_PORT:
7357 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7358 		return DRM_MODE_CONNECTOR_DisplayPort;
7359 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7360 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7361 		return DRM_MODE_CONNECTOR_DVID;
7362 	case SIGNAL_TYPE_VIRTUAL:
7363 		return DRM_MODE_CONNECTOR_VIRTUAL;
7364 
7365 	default:
7366 		return DRM_MODE_CONNECTOR_Unknown;
7367 	}
7368 }
7369 
7370 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7371 {
7372 	struct drm_encoder *encoder;
7373 
7374 	/* There is only one encoder per connector */
7375 	drm_connector_for_each_possible_encoder(connector, encoder)
7376 		return encoder;
7377 
7378 	return NULL;
7379 }
7380 
7381 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7382 {
7383 	struct drm_encoder *encoder;
7384 	struct amdgpu_encoder *amdgpu_encoder;
7385 
7386 	encoder = amdgpu_dm_connector_to_encoder(connector);
7387 
7388 	if (encoder == NULL)
7389 		return;
7390 
7391 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7392 
7393 	amdgpu_encoder->native_mode.clock = 0;
7394 
7395 	if (!list_empty(&connector->probed_modes)) {
7396 		struct drm_display_mode *preferred_mode = NULL;
7397 
7398 		list_for_each_entry(preferred_mode,
7399 				    &connector->probed_modes,
7400 				    head) {
7401 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7402 				amdgpu_encoder->native_mode = *preferred_mode;
7403 
7404 			break;
7405 		}
7406 
7407 	}
7408 }
7409 
7410 static struct drm_display_mode *
7411 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7412 			     char *name,
7413 			     int hdisplay, int vdisplay)
7414 {
7415 	struct drm_device *dev = encoder->dev;
7416 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7417 	struct drm_display_mode *mode = NULL;
7418 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7419 
7420 	mode = drm_mode_duplicate(dev, native_mode);
7421 
7422 	if (mode == NULL)
7423 		return NULL;
7424 
7425 	mode->hdisplay = hdisplay;
7426 	mode->vdisplay = vdisplay;
7427 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7428 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7429 
7430 	return mode;
7431 
7432 }
7433 
7434 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7435 						 struct drm_connector *connector)
7436 {
7437 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7438 	struct drm_display_mode *mode = NULL;
7439 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7440 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7441 				to_amdgpu_dm_connector(connector);
7442 	int i;
7443 	int n;
7444 	struct mode_size {
7445 		char name[DRM_DISPLAY_MODE_LEN];
7446 		int w;
7447 		int h;
7448 	} common_modes[] = {
7449 		{  "640x480",  640,  480},
7450 		{  "800x600",  800,  600},
7451 		{ "1024x768", 1024,  768},
7452 		{ "1280x720", 1280,  720},
7453 		{ "1280x800", 1280,  800},
7454 		{"1280x1024", 1280, 1024},
7455 		{ "1440x900", 1440,  900},
7456 		{"1680x1050", 1680, 1050},
7457 		{"1600x1200", 1600, 1200},
7458 		{"1920x1080", 1920, 1080},
7459 		{"1920x1200", 1920, 1200}
7460 	};
7461 
7462 	n = ARRAY_SIZE(common_modes);
7463 
7464 	for (i = 0; i < n; i++) {
7465 		struct drm_display_mode *curmode = NULL;
7466 		bool mode_existed = false;
7467 
7468 		if (common_modes[i].w > native_mode->hdisplay ||
7469 		    common_modes[i].h > native_mode->vdisplay ||
7470 		   (common_modes[i].w == native_mode->hdisplay &&
7471 		    common_modes[i].h == native_mode->vdisplay))
7472 			continue;
7473 
7474 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7475 			if (common_modes[i].w == curmode->hdisplay &&
7476 			    common_modes[i].h == curmode->vdisplay) {
7477 				mode_existed = true;
7478 				break;
7479 			}
7480 		}
7481 
7482 		if (mode_existed)
7483 			continue;
7484 
7485 		mode = amdgpu_dm_create_common_mode(encoder,
7486 				common_modes[i].name, common_modes[i].w,
7487 				common_modes[i].h);
7488 		drm_mode_probed_add(connector, mode);
7489 		amdgpu_dm_connector->num_modes++;
7490 	}
7491 }
7492 
7493 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7494 					      struct edid *edid)
7495 {
7496 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7497 			to_amdgpu_dm_connector(connector);
7498 
7499 	if (edid) {
7500 		/* empty probed_modes */
7501 		INIT_LIST_HEAD(&connector->probed_modes);
7502 		amdgpu_dm_connector->num_modes =
7503 				drm_add_edid_modes(connector, edid);
7504 
7505 		/* sorting the probed modes before calling function
7506 		 * amdgpu_dm_get_native_mode() since EDID can have
7507 		 * more than one preferred mode. The modes that are
7508 		 * later in the probed mode list could be of higher
7509 		 * and preferred resolution. For example, 3840x2160
7510 		 * resolution in base EDID preferred timing and 4096x2160
7511 		 * preferred resolution in DID extension block later.
7512 		 */
7513 		drm_mode_sort(&connector->probed_modes);
7514 		amdgpu_dm_get_native_mode(connector);
7515 
7516 		/* Freesync capabilities are reset by calling
7517 		 * drm_add_edid_modes() and need to be
7518 		 * restored here.
7519 		 */
7520 		amdgpu_dm_update_freesync_caps(connector, edid);
7521 	} else {
7522 		amdgpu_dm_connector->num_modes = 0;
7523 	}
7524 }
7525 
7526 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7527 			      struct drm_display_mode *mode)
7528 {
7529 	struct drm_display_mode *m;
7530 
7531 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7532 		if (drm_mode_equal(m, mode))
7533 			return true;
7534 	}
7535 
7536 	return false;
7537 }
7538 
7539 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7540 {
7541 	const struct drm_display_mode *m;
7542 	struct drm_display_mode *new_mode;
7543 	uint i;
7544 	uint32_t new_modes_count = 0;
7545 
7546 	/* Standard FPS values
7547 	 *
7548 	 * 23.976   - TV/NTSC
7549 	 * 24 	    - Cinema
7550 	 * 25 	    - TV/PAL
7551 	 * 29.97    - TV/NTSC
7552 	 * 30 	    - TV/NTSC
7553 	 * 48 	    - Cinema HFR
7554 	 * 50 	    - TV/PAL
7555 	 * 60 	    - Commonly used
7556 	 * 48,72,96 - Multiples of 24
7557 	 */
7558 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7559 					 48000, 50000, 60000, 72000, 96000 };
7560 
7561 	/*
7562 	 * Find mode with highest refresh rate with the same resolution
7563 	 * as the preferred mode. Some monitors report a preferred mode
7564 	 * with lower resolution than the highest refresh rate supported.
7565 	 */
7566 
7567 	m = get_highest_refresh_rate_mode(aconnector, true);
7568 	if (!m)
7569 		return 0;
7570 
7571 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7572 		uint64_t target_vtotal, target_vtotal_diff;
7573 		uint64_t num, den;
7574 
7575 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7576 			continue;
7577 
7578 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7579 		    common_rates[i] > aconnector->max_vfreq * 1000)
7580 			continue;
7581 
7582 		num = (unsigned long long)m->clock * 1000 * 1000;
7583 		den = common_rates[i] * (unsigned long long)m->htotal;
7584 		target_vtotal = div_u64(num, den);
7585 		target_vtotal_diff = target_vtotal - m->vtotal;
7586 
7587 		/* Check for illegal modes */
7588 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7589 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7590 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7591 			continue;
7592 
7593 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7594 		if (!new_mode)
7595 			goto out;
7596 
7597 		new_mode->vtotal += (u16)target_vtotal_diff;
7598 		new_mode->vsync_start += (u16)target_vtotal_diff;
7599 		new_mode->vsync_end += (u16)target_vtotal_diff;
7600 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7601 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7602 
7603 		if (!is_duplicate_mode(aconnector, new_mode)) {
7604 			drm_mode_probed_add(&aconnector->base, new_mode);
7605 			new_modes_count += 1;
7606 		} else
7607 			drm_mode_destroy(aconnector->base.dev, new_mode);
7608 	}
7609  out:
7610 	return new_modes_count;
7611 }
7612 
7613 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7614 						   struct edid *edid)
7615 {
7616 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7617 		to_amdgpu_dm_connector(connector);
7618 
7619 	if (!(amdgpu_freesync_vid_mode && edid))
7620 		return;
7621 
7622 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7623 		amdgpu_dm_connector->num_modes +=
7624 			add_fs_modes(amdgpu_dm_connector);
7625 }
7626 
7627 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7628 {
7629 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7630 			to_amdgpu_dm_connector(connector);
7631 	struct drm_encoder *encoder;
7632 	struct edid *edid = amdgpu_dm_connector->edid;
7633 
7634 	encoder = amdgpu_dm_connector_to_encoder(connector);
7635 
7636 	if (!drm_edid_is_valid(edid)) {
7637 		amdgpu_dm_connector->num_modes =
7638 				drm_add_modes_noedid(connector, 640, 480);
7639 	} else {
7640 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7641 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7642 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7643 	}
7644 	amdgpu_dm_fbc_init(connector);
7645 
7646 	return amdgpu_dm_connector->num_modes;
7647 }
7648 
7649 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7650 				     struct amdgpu_dm_connector *aconnector,
7651 				     int connector_type,
7652 				     struct dc_link *link,
7653 				     int link_index)
7654 {
7655 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7656 
7657 	/*
7658 	 * Some of the properties below require access to state, like bpc.
7659 	 * Allocate some default initial connector state with our reset helper.
7660 	 */
7661 	if (aconnector->base.funcs->reset)
7662 		aconnector->base.funcs->reset(&aconnector->base);
7663 
7664 	aconnector->connector_id = link_index;
7665 	aconnector->dc_link = link;
7666 	aconnector->base.interlace_allowed = false;
7667 	aconnector->base.doublescan_allowed = false;
7668 	aconnector->base.stereo_allowed = false;
7669 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7670 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7671 	aconnector->audio_inst = -1;
7672 	mutex_init(&aconnector->hpd_lock);
7673 
7674 	/*
7675 	 * configure support HPD hot plug connector_>polled default value is 0
7676 	 * which means HPD hot plug not supported
7677 	 */
7678 	switch (connector_type) {
7679 	case DRM_MODE_CONNECTOR_HDMIA:
7680 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7681 		aconnector->base.ycbcr_420_allowed =
7682 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7683 		break;
7684 	case DRM_MODE_CONNECTOR_DisplayPort:
7685 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7686 		aconnector->base.ycbcr_420_allowed =
7687 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7688 		break;
7689 	case DRM_MODE_CONNECTOR_DVID:
7690 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7691 		break;
7692 	default:
7693 		break;
7694 	}
7695 
7696 	drm_object_attach_property(&aconnector->base.base,
7697 				dm->ddev->mode_config.scaling_mode_property,
7698 				DRM_MODE_SCALE_NONE);
7699 
7700 	drm_object_attach_property(&aconnector->base.base,
7701 				adev->mode_info.underscan_property,
7702 				UNDERSCAN_OFF);
7703 	drm_object_attach_property(&aconnector->base.base,
7704 				adev->mode_info.underscan_hborder_property,
7705 				0);
7706 	drm_object_attach_property(&aconnector->base.base,
7707 				adev->mode_info.underscan_vborder_property,
7708 				0);
7709 
7710 	if (!aconnector->mst_port)
7711 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7712 
7713 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7714 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7715 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7716 
7717 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7718 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7719 		drm_object_attach_property(&aconnector->base.base,
7720 				adev->mode_info.abm_level_property, 0);
7721 	}
7722 
7723 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7724 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7725 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7726 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7727 
7728 		if (!aconnector->mst_port)
7729 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7730 
7731 #ifdef CONFIG_DRM_AMD_DC_HDCP
7732 		if (adev->dm.hdcp_workqueue)
7733 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7734 #endif
7735 	}
7736 }
7737 
7738 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7739 			      struct i2c_msg *msgs, int num)
7740 {
7741 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7742 	struct ddc_service *ddc_service = i2c->ddc_service;
7743 	struct i2c_command cmd;
7744 	int i;
7745 	int result = -EIO;
7746 
7747 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7748 
7749 	if (!cmd.payloads)
7750 		return result;
7751 
7752 	cmd.number_of_payloads = num;
7753 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7754 	cmd.speed = 100;
7755 
7756 	for (i = 0; i < num; i++) {
7757 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7758 		cmd.payloads[i].address = msgs[i].addr;
7759 		cmd.payloads[i].length = msgs[i].len;
7760 		cmd.payloads[i].data = msgs[i].buf;
7761 	}
7762 
7763 	if (dc_submit_i2c(
7764 			ddc_service->ctx->dc,
7765 			ddc_service->ddc_pin->hw_info.ddc_channel,
7766 			&cmd))
7767 		result = num;
7768 
7769 	kfree(cmd.payloads);
7770 	return result;
7771 }
7772 
7773 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7774 {
7775 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7776 }
7777 
7778 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7779 	.master_xfer = amdgpu_dm_i2c_xfer,
7780 	.functionality = amdgpu_dm_i2c_func,
7781 };
7782 
7783 static struct amdgpu_i2c_adapter *
7784 create_i2c(struct ddc_service *ddc_service,
7785 	   int link_index,
7786 	   int *res)
7787 {
7788 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7789 	struct amdgpu_i2c_adapter *i2c;
7790 
7791 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7792 	if (!i2c)
7793 		return NULL;
7794 	i2c->base.owner = THIS_MODULE;
7795 	i2c->base.class = I2C_CLASS_DDC;
7796 	i2c->base.dev.parent = &adev->pdev->dev;
7797 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7798 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7799 	i2c_set_adapdata(&i2c->base, i2c);
7800 	i2c->ddc_service = ddc_service;
7801 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7802 
7803 	return i2c;
7804 }
7805 
7806 
7807 /*
7808  * Note: this function assumes that dc_link_detect() was called for the
7809  * dc_link which will be represented by this aconnector.
7810  */
7811 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7812 				    struct amdgpu_dm_connector *aconnector,
7813 				    uint32_t link_index,
7814 				    struct amdgpu_encoder *aencoder)
7815 {
7816 	int res = 0;
7817 	int connector_type;
7818 	struct dc *dc = dm->dc;
7819 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7820 	struct amdgpu_i2c_adapter *i2c;
7821 
7822 	link->priv = aconnector;
7823 
7824 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7825 
7826 	i2c = create_i2c(link->ddc, link->link_index, &res);
7827 	if (!i2c) {
7828 		DRM_ERROR("Failed to create i2c adapter data\n");
7829 		return -ENOMEM;
7830 	}
7831 
7832 	aconnector->i2c = i2c;
7833 	res = i2c_add_adapter(&i2c->base);
7834 
7835 	if (res) {
7836 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7837 		goto out_free;
7838 	}
7839 
7840 	connector_type = to_drm_connector_type(link->connector_signal);
7841 
7842 	res = drm_connector_init_with_ddc(
7843 			dm->ddev,
7844 			&aconnector->base,
7845 			&amdgpu_dm_connector_funcs,
7846 			connector_type,
7847 			&i2c->base);
7848 
7849 	if (res) {
7850 		DRM_ERROR("connector_init failed\n");
7851 		aconnector->connector_id = -1;
7852 		goto out_free;
7853 	}
7854 
7855 	drm_connector_helper_add(
7856 			&aconnector->base,
7857 			&amdgpu_dm_connector_helper_funcs);
7858 
7859 	amdgpu_dm_connector_init_helper(
7860 		dm,
7861 		aconnector,
7862 		connector_type,
7863 		link,
7864 		link_index);
7865 
7866 	drm_connector_attach_encoder(
7867 		&aconnector->base, &aencoder->base);
7868 
7869 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7870 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7871 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7872 
7873 out_free:
7874 	if (res) {
7875 		kfree(i2c);
7876 		aconnector->i2c = NULL;
7877 	}
7878 	return res;
7879 }
7880 
7881 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7882 {
7883 	switch (adev->mode_info.num_crtc) {
7884 	case 1:
7885 		return 0x1;
7886 	case 2:
7887 		return 0x3;
7888 	case 3:
7889 		return 0x7;
7890 	case 4:
7891 		return 0xf;
7892 	case 5:
7893 		return 0x1f;
7894 	case 6:
7895 	default:
7896 		return 0x3f;
7897 	}
7898 }
7899 
7900 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7901 				  struct amdgpu_encoder *aencoder,
7902 				  uint32_t link_index)
7903 {
7904 	struct amdgpu_device *adev = drm_to_adev(dev);
7905 
7906 	int res = drm_encoder_init(dev,
7907 				   &aencoder->base,
7908 				   &amdgpu_dm_encoder_funcs,
7909 				   DRM_MODE_ENCODER_TMDS,
7910 				   NULL);
7911 
7912 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7913 
7914 	if (!res)
7915 		aencoder->encoder_id = link_index;
7916 	else
7917 		aencoder->encoder_id = -1;
7918 
7919 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7920 
7921 	return res;
7922 }
7923 
7924 static void manage_dm_interrupts(struct amdgpu_device *adev,
7925 				 struct amdgpu_crtc *acrtc,
7926 				 bool enable)
7927 {
7928 	/*
7929 	 * We have no guarantee that the frontend index maps to the same
7930 	 * backend index - some even map to more than one.
7931 	 *
7932 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7933 	 */
7934 	int irq_type =
7935 		amdgpu_display_crtc_idx_to_irq_type(
7936 			adev,
7937 			acrtc->crtc_id);
7938 
7939 	if (enable) {
7940 		drm_crtc_vblank_on(&acrtc->base);
7941 		amdgpu_irq_get(
7942 			adev,
7943 			&adev->pageflip_irq,
7944 			irq_type);
7945 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7946 		amdgpu_irq_get(
7947 			adev,
7948 			&adev->vline0_irq,
7949 			irq_type);
7950 #endif
7951 	} else {
7952 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7953 		amdgpu_irq_put(
7954 			adev,
7955 			&adev->vline0_irq,
7956 			irq_type);
7957 #endif
7958 		amdgpu_irq_put(
7959 			adev,
7960 			&adev->pageflip_irq,
7961 			irq_type);
7962 		drm_crtc_vblank_off(&acrtc->base);
7963 	}
7964 }
7965 
7966 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7967 				      struct amdgpu_crtc *acrtc)
7968 {
7969 	int irq_type =
7970 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7971 
7972 	/**
7973 	 * This reads the current state for the IRQ and force reapplies
7974 	 * the setting to hardware.
7975 	 */
7976 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7977 }
7978 
7979 static bool
7980 is_scaling_state_different(const struct dm_connector_state *dm_state,
7981 			   const struct dm_connector_state *old_dm_state)
7982 {
7983 	if (dm_state->scaling != old_dm_state->scaling)
7984 		return true;
7985 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7986 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7987 			return true;
7988 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7989 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7990 			return true;
7991 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7992 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7993 		return true;
7994 	return false;
7995 }
7996 
7997 #ifdef CONFIG_DRM_AMD_DC_HDCP
7998 static bool is_content_protection_different(struct drm_connector_state *state,
7999 					    const struct drm_connector_state *old_state,
8000 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8001 {
8002 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8003 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8004 
8005 	/* Handle: Type0/1 change */
8006 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8007 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8008 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8009 		return true;
8010 	}
8011 
8012 	/* CP is being re enabled, ignore this
8013 	 *
8014 	 * Handles:	ENABLED -> DESIRED
8015 	 */
8016 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8017 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8018 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8019 		return false;
8020 	}
8021 
8022 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8023 	 *
8024 	 * Handles:	UNDESIRED -> ENABLED
8025 	 */
8026 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8027 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8028 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8029 
8030 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8031 	 * hot-plug, headless s3, dpms
8032 	 *
8033 	 * Handles:	DESIRED -> DESIRED (Special case)
8034 	 */
8035 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8036 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8037 		dm_con_state->update_hdcp = false;
8038 		return true;
8039 	}
8040 
8041 	/*
8042 	 * Handles:	UNDESIRED -> UNDESIRED
8043 	 *		DESIRED -> DESIRED
8044 	 *		ENABLED -> ENABLED
8045 	 */
8046 	if (old_state->content_protection == state->content_protection)
8047 		return false;
8048 
8049 	/*
8050 	 * Handles:	UNDESIRED -> DESIRED
8051 	 *		DESIRED -> UNDESIRED
8052 	 *		ENABLED -> UNDESIRED
8053 	 */
8054 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8055 		return true;
8056 
8057 	/*
8058 	 * Handles:	DESIRED -> ENABLED
8059 	 */
8060 	return false;
8061 }
8062 
8063 #endif
8064 static void remove_stream(struct amdgpu_device *adev,
8065 			  struct amdgpu_crtc *acrtc,
8066 			  struct dc_stream_state *stream)
8067 {
8068 	/* this is the update mode case */
8069 
8070 	acrtc->otg_inst = -1;
8071 	acrtc->enabled = false;
8072 }
8073 
8074 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8075 			       struct dc_cursor_position *position)
8076 {
8077 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8078 	int x, y;
8079 	int xorigin = 0, yorigin = 0;
8080 
8081 	if (!crtc || !plane->state->fb)
8082 		return 0;
8083 
8084 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8085 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8086 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8087 			  __func__,
8088 			  plane->state->crtc_w,
8089 			  plane->state->crtc_h);
8090 		return -EINVAL;
8091 	}
8092 
8093 	x = plane->state->crtc_x;
8094 	y = plane->state->crtc_y;
8095 
8096 	if (x <= -amdgpu_crtc->max_cursor_width ||
8097 	    y <= -amdgpu_crtc->max_cursor_height)
8098 		return 0;
8099 
8100 	if (x < 0) {
8101 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8102 		x = 0;
8103 	}
8104 	if (y < 0) {
8105 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8106 		y = 0;
8107 	}
8108 	position->enable = true;
8109 	position->translate_by_source = true;
8110 	position->x = x;
8111 	position->y = y;
8112 	position->x_hotspot = xorigin;
8113 	position->y_hotspot = yorigin;
8114 
8115 	return 0;
8116 }
8117 
8118 static void handle_cursor_update(struct drm_plane *plane,
8119 				 struct drm_plane_state *old_plane_state)
8120 {
8121 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8122 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8123 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8124 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8125 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8126 	uint64_t address = afb ? afb->address : 0;
8127 	struct dc_cursor_position position = {0};
8128 	struct dc_cursor_attributes attributes;
8129 	int ret;
8130 
8131 	if (!plane->state->fb && !old_plane_state->fb)
8132 		return;
8133 
8134 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8135 		      __func__,
8136 		      amdgpu_crtc->crtc_id,
8137 		      plane->state->crtc_w,
8138 		      plane->state->crtc_h);
8139 
8140 	ret = get_cursor_position(plane, crtc, &position);
8141 	if (ret)
8142 		return;
8143 
8144 	if (!position.enable) {
8145 		/* turn off cursor */
8146 		if (crtc_state && crtc_state->stream) {
8147 			mutex_lock(&adev->dm.dc_lock);
8148 			dc_stream_set_cursor_position(crtc_state->stream,
8149 						      &position);
8150 			mutex_unlock(&adev->dm.dc_lock);
8151 		}
8152 		return;
8153 	}
8154 
8155 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8156 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8157 
8158 	memset(&attributes, 0, sizeof(attributes));
8159 	attributes.address.high_part = upper_32_bits(address);
8160 	attributes.address.low_part  = lower_32_bits(address);
8161 	attributes.width             = plane->state->crtc_w;
8162 	attributes.height            = plane->state->crtc_h;
8163 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8164 	attributes.rotation_angle    = 0;
8165 	attributes.attribute_flags.value = 0;
8166 
8167 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8168 
8169 	if (crtc_state->stream) {
8170 		mutex_lock(&adev->dm.dc_lock);
8171 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8172 							 &attributes))
8173 			DRM_ERROR("DC failed to set cursor attributes\n");
8174 
8175 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8176 						   &position))
8177 			DRM_ERROR("DC failed to set cursor position\n");
8178 		mutex_unlock(&adev->dm.dc_lock);
8179 	}
8180 }
8181 
8182 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8183 {
8184 
8185 	assert_spin_locked(&acrtc->base.dev->event_lock);
8186 	WARN_ON(acrtc->event);
8187 
8188 	acrtc->event = acrtc->base.state->event;
8189 
8190 	/* Set the flip status */
8191 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8192 
8193 	/* Mark this event as consumed */
8194 	acrtc->base.state->event = NULL;
8195 
8196 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8197 		     acrtc->crtc_id);
8198 }
8199 
8200 static void update_freesync_state_on_stream(
8201 	struct amdgpu_display_manager *dm,
8202 	struct dm_crtc_state *new_crtc_state,
8203 	struct dc_stream_state *new_stream,
8204 	struct dc_plane_state *surface,
8205 	u32 flip_timestamp_in_us)
8206 {
8207 	struct mod_vrr_params vrr_params;
8208 	struct dc_info_packet vrr_infopacket = {0};
8209 	struct amdgpu_device *adev = dm->adev;
8210 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8211 	unsigned long flags;
8212 	bool pack_sdp_v1_3 = false;
8213 
8214 	if (!new_stream)
8215 		return;
8216 
8217 	/*
8218 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8219 	 * For now it's sufficient to just guard against these conditions.
8220 	 */
8221 
8222 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8223 		return;
8224 
8225 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8226         vrr_params = acrtc->dm_irq_params.vrr_params;
8227 
8228 	if (surface) {
8229 		mod_freesync_handle_preflip(
8230 			dm->freesync_module,
8231 			surface,
8232 			new_stream,
8233 			flip_timestamp_in_us,
8234 			&vrr_params);
8235 
8236 		if (adev->family < AMDGPU_FAMILY_AI &&
8237 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8238 			mod_freesync_handle_v_update(dm->freesync_module,
8239 						     new_stream, &vrr_params);
8240 
8241 			/* Need to call this before the frame ends. */
8242 			dc_stream_adjust_vmin_vmax(dm->dc,
8243 						   new_crtc_state->stream,
8244 						   &vrr_params.adjust);
8245 		}
8246 	}
8247 
8248 	mod_freesync_build_vrr_infopacket(
8249 		dm->freesync_module,
8250 		new_stream,
8251 		&vrr_params,
8252 		PACKET_TYPE_VRR,
8253 		TRANSFER_FUNC_UNKNOWN,
8254 		&vrr_infopacket,
8255 		pack_sdp_v1_3);
8256 
8257 	new_crtc_state->freesync_timing_changed |=
8258 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8259 			&vrr_params.adjust,
8260 			sizeof(vrr_params.adjust)) != 0);
8261 
8262 	new_crtc_state->freesync_vrr_info_changed |=
8263 		(memcmp(&new_crtc_state->vrr_infopacket,
8264 			&vrr_infopacket,
8265 			sizeof(vrr_infopacket)) != 0);
8266 
8267 	acrtc->dm_irq_params.vrr_params = vrr_params;
8268 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8269 
8270 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8271 	new_stream->vrr_infopacket = vrr_infopacket;
8272 
8273 	if (new_crtc_state->freesync_vrr_info_changed)
8274 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8275 			      new_crtc_state->base.crtc->base.id,
8276 			      (int)new_crtc_state->base.vrr_enabled,
8277 			      (int)vrr_params.state);
8278 
8279 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8280 }
8281 
8282 static void update_stream_irq_parameters(
8283 	struct amdgpu_display_manager *dm,
8284 	struct dm_crtc_state *new_crtc_state)
8285 {
8286 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8287 	struct mod_vrr_params vrr_params;
8288 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8289 	struct amdgpu_device *adev = dm->adev;
8290 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8291 	unsigned long flags;
8292 
8293 	if (!new_stream)
8294 		return;
8295 
8296 	/*
8297 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8298 	 * For now it's sufficient to just guard against these conditions.
8299 	 */
8300 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8301 		return;
8302 
8303 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8304 	vrr_params = acrtc->dm_irq_params.vrr_params;
8305 
8306 	if (new_crtc_state->vrr_supported &&
8307 	    config.min_refresh_in_uhz &&
8308 	    config.max_refresh_in_uhz) {
8309 		/*
8310 		 * if freesync compatible mode was set, config.state will be set
8311 		 * in atomic check
8312 		 */
8313 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8314 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8315 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8316 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8317 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8318 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8319 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8320 		} else {
8321 			config.state = new_crtc_state->base.vrr_enabled ?
8322 						     VRR_STATE_ACTIVE_VARIABLE :
8323 						     VRR_STATE_INACTIVE;
8324 		}
8325 	} else {
8326 		config.state = VRR_STATE_UNSUPPORTED;
8327 	}
8328 
8329 	mod_freesync_build_vrr_params(dm->freesync_module,
8330 				      new_stream,
8331 				      &config, &vrr_params);
8332 
8333 	new_crtc_state->freesync_timing_changed |=
8334 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8335 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8336 
8337 	new_crtc_state->freesync_config = config;
8338 	/* Copy state for access from DM IRQ handler */
8339 	acrtc->dm_irq_params.freesync_config = config;
8340 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8341 	acrtc->dm_irq_params.vrr_params = vrr_params;
8342 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8343 }
8344 
8345 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8346 					    struct dm_crtc_state *new_state)
8347 {
8348 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8349 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8350 
8351 	if (!old_vrr_active && new_vrr_active) {
8352 		/* Transition VRR inactive -> active:
8353 		 * While VRR is active, we must not disable vblank irq, as a
8354 		 * reenable after disable would compute bogus vblank/pflip
8355 		 * timestamps if it likely happened inside display front-porch.
8356 		 *
8357 		 * We also need vupdate irq for the actual core vblank handling
8358 		 * at end of vblank.
8359 		 */
8360 		dm_set_vupdate_irq(new_state->base.crtc, true);
8361 		drm_crtc_vblank_get(new_state->base.crtc);
8362 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8363 				 __func__, new_state->base.crtc->base.id);
8364 	} else if (old_vrr_active && !new_vrr_active) {
8365 		/* Transition VRR active -> inactive:
8366 		 * Allow vblank irq disable again for fixed refresh rate.
8367 		 */
8368 		dm_set_vupdate_irq(new_state->base.crtc, false);
8369 		drm_crtc_vblank_put(new_state->base.crtc);
8370 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8371 				 __func__, new_state->base.crtc->base.id);
8372 	}
8373 }
8374 
8375 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8376 {
8377 	struct drm_plane *plane;
8378 	struct drm_plane_state *old_plane_state;
8379 	int i;
8380 
8381 	/*
8382 	 * TODO: Make this per-stream so we don't issue redundant updates for
8383 	 * commits with multiple streams.
8384 	 */
8385 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8386 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8387 			handle_cursor_update(plane, old_plane_state);
8388 }
8389 
8390 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8391 				    struct dc_state *dc_state,
8392 				    struct drm_device *dev,
8393 				    struct amdgpu_display_manager *dm,
8394 				    struct drm_crtc *pcrtc,
8395 				    bool wait_for_vblank)
8396 {
8397 	uint32_t i;
8398 	uint64_t timestamp_ns;
8399 	struct drm_plane *plane;
8400 	struct drm_plane_state *old_plane_state, *new_plane_state;
8401 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8402 	struct drm_crtc_state *new_pcrtc_state =
8403 			drm_atomic_get_new_crtc_state(state, pcrtc);
8404 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8405 	struct dm_crtc_state *dm_old_crtc_state =
8406 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8407 	int planes_count = 0, vpos, hpos;
8408 	long r;
8409 	unsigned long flags;
8410 	struct amdgpu_bo *abo;
8411 	uint32_t target_vblank, last_flip_vblank;
8412 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8413 	bool pflip_present = false;
8414 	struct {
8415 		struct dc_surface_update surface_updates[MAX_SURFACES];
8416 		struct dc_plane_info plane_infos[MAX_SURFACES];
8417 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8418 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8419 		struct dc_stream_update stream_update;
8420 	} *bundle;
8421 
8422 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8423 
8424 	if (!bundle) {
8425 		dm_error("Failed to allocate update bundle\n");
8426 		goto cleanup;
8427 	}
8428 
8429 	/*
8430 	 * Disable the cursor first if we're disabling all the planes.
8431 	 * It'll remain on the screen after the planes are re-enabled
8432 	 * if we don't.
8433 	 */
8434 	if (acrtc_state->active_planes == 0)
8435 		amdgpu_dm_commit_cursors(state);
8436 
8437 	/* update planes when needed */
8438 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8439 		struct drm_crtc *crtc = new_plane_state->crtc;
8440 		struct drm_crtc_state *new_crtc_state;
8441 		struct drm_framebuffer *fb = new_plane_state->fb;
8442 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8443 		bool plane_needs_flip;
8444 		struct dc_plane_state *dc_plane;
8445 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8446 
8447 		/* Cursor plane is handled after stream updates */
8448 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8449 			continue;
8450 
8451 		if (!fb || !crtc || pcrtc != crtc)
8452 			continue;
8453 
8454 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8455 		if (!new_crtc_state->active)
8456 			continue;
8457 
8458 		dc_plane = dm_new_plane_state->dc_state;
8459 
8460 		bundle->surface_updates[planes_count].surface = dc_plane;
8461 		if (new_pcrtc_state->color_mgmt_changed) {
8462 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8463 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8464 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8465 		}
8466 
8467 		fill_dc_scaling_info(new_plane_state,
8468 				     &bundle->scaling_infos[planes_count]);
8469 
8470 		bundle->surface_updates[planes_count].scaling_info =
8471 			&bundle->scaling_infos[planes_count];
8472 
8473 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8474 
8475 		pflip_present = pflip_present || plane_needs_flip;
8476 
8477 		if (!plane_needs_flip) {
8478 			planes_count += 1;
8479 			continue;
8480 		}
8481 
8482 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8483 
8484 		/*
8485 		 * Wait for all fences on this FB. Do limited wait to avoid
8486 		 * deadlock during GPU reset when this fence will not signal
8487 		 * but we hold reservation lock for the BO.
8488 		 */
8489 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8490 					  msecs_to_jiffies(5000));
8491 		if (unlikely(r <= 0))
8492 			DRM_ERROR("Waiting for fences timed out!");
8493 
8494 		fill_dc_plane_info_and_addr(
8495 			dm->adev, new_plane_state,
8496 			afb->tiling_flags,
8497 			&bundle->plane_infos[planes_count],
8498 			&bundle->flip_addrs[planes_count].address,
8499 			afb->tmz_surface, false);
8500 
8501 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8502 				 new_plane_state->plane->index,
8503 				 bundle->plane_infos[planes_count].dcc.enable);
8504 
8505 		bundle->surface_updates[planes_count].plane_info =
8506 			&bundle->plane_infos[planes_count];
8507 
8508 		/*
8509 		 * Only allow immediate flips for fast updates that don't
8510 		 * change FB pitch, DCC state, rotation or mirroing.
8511 		 */
8512 		bundle->flip_addrs[planes_count].flip_immediate =
8513 			crtc->state->async_flip &&
8514 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8515 
8516 		timestamp_ns = ktime_get_ns();
8517 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8518 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8519 		bundle->surface_updates[planes_count].surface = dc_plane;
8520 
8521 		if (!bundle->surface_updates[planes_count].surface) {
8522 			DRM_ERROR("No surface for CRTC: id=%d\n",
8523 					acrtc_attach->crtc_id);
8524 			continue;
8525 		}
8526 
8527 		if (plane == pcrtc->primary)
8528 			update_freesync_state_on_stream(
8529 				dm,
8530 				acrtc_state,
8531 				acrtc_state->stream,
8532 				dc_plane,
8533 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8534 
8535 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8536 				 __func__,
8537 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8538 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8539 
8540 		planes_count += 1;
8541 
8542 	}
8543 
8544 	if (pflip_present) {
8545 		if (!vrr_active) {
8546 			/* Use old throttling in non-vrr fixed refresh rate mode
8547 			 * to keep flip scheduling based on target vblank counts
8548 			 * working in a backwards compatible way, e.g., for
8549 			 * clients using the GLX_OML_sync_control extension or
8550 			 * DRI3/Present extension with defined target_msc.
8551 			 */
8552 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8553 		}
8554 		else {
8555 			/* For variable refresh rate mode only:
8556 			 * Get vblank of last completed flip to avoid > 1 vrr
8557 			 * flips per video frame by use of throttling, but allow
8558 			 * flip programming anywhere in the possibly large
8559 			 * variable vrr vblank interval for fine-grained flip
8560 			 * timing control and more opportunity to avoid stutter
8561 			 * on late submission of flips.
8562 			 */
8563 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8564 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8565 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8566 		}
8567 
8568 		target_vblank = last_flip_vblank + wait_for_vblank;
8569 
8570 		/*
8571 		 * Wait until we're out of the vertical blank period before the one
8572 		 * targeted by the flip
8573 		 */
8574 		while ((acrtc_attach->enabled &&
8575 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8576 							    0, &vpos, &hpos, NULL,
8577 							    NULL, &pcrtc->hwmode)
8578 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8579 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8580 			(int)(target_vblank -
8581 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8582 			usleep_range(1000, 1100);
8583 		}
8584 
8585 		/**
8586 		 * Prepare the flip event for the pageflip interrupt to handle.
8587 		 *
8588 		 * This only works in the case where we've already turned on the
8589 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8590 		 * from 0 -> n planes we have to skip a hardware generated event
8591 		 * and rely on sending it from software.
8592 		 */
8593 		if (acrtc_attach->base.state->event &&
8594 		    acrtc_state->active_planes > 0) {
8595 			drm_crtc_vblank_get(pcrtc);
8596 
8597 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8598 
8599 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8600 			prepare_flip_isr(acrtc_attach);
8601 
8602 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8603 		}
8604 
8605 		if (acrtc_state->stream) {
8606 			if (acrtc_state->freesync_vrr_info_changed)
8607 				bundle->stream_update.vrr_infopacket =
8608 					&acrtc_state->stream->vrr_infopacket;
8609 		}
8610 	}
8611 
8612 	/* Update the planes if changed or disable if we don't have any. */
8613 	if ((planes_count || acrtc_state->active_planes == 0) &&
8614 		acrtc_state->stream) {
8615 		bundle->stream_update.stream = acrtc_state->stream;
8616 		if (new_pcrtc_state->mode_changed) {
8617 			bundle->stream_update.src = acrtc_state->stream->src;
8618 			bundle->stream_update.dst = acrtc_state->stream->dst;
8619 		}
8620 
8621 		if (new_pcrtc_state->color_mgmt_changed) {
8622 			/*
8623 			 * TODO: This isn't fully correct since we've actually
8624 			 * already modified the stream in place.
8625 			 */
8626 			bundle->stream_update.gamut_remap =
8627 				&acrtc_state->stream->gamut_remap_matrix;
8628 			bundle->stream_update.output_csc_transform =
8629 				&acrtc_state->stream->csc_color_matrix;
8630 			bundle->stream_update.out_transfer_func =
8631 				acrtc_state->stream->out_transfer_func;
8632 		}
8633 
8634 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8635 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8636 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8637 
8638 		/*
8639 		 * If FreeSync state on the stream has changed then we need to
8640 		 * re-adjust the min/max bounds now that DC doesn't handle this
8641 		 * as part of commit.
8642 		 */
8643 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8644 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8645 			dc_stream_adjust_vmin_vmax(
8646 				dm->dc, acrtc_state->stream,
8647 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8648 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8649 		}
8650 		mutex_lock(&dm->dc_lock);
8651 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8652 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8653 			amdgpu_dm_psr_disable(acrtc_state->stream);
8654 
8655 		dc_commit_updates_for_stream(dm->dc,
8656 						     bundle->surface_updates,
8657 						     planes_count,
8658 						     acrtc_state->stream,
8659 						     &bundle->stream_update,
8660 						     dc_state);
8661 
8662 		/**
8663 		 * Enable or disable the interrupts on the backend.
8664 		 *
8665 		 * Most pipes are put into power gating when unused.
8666 		 *
8667 		 * When power gating is enabled on a pipe we lose the
8668 		 * interrupt enablement state when power gating is disabled.
8669 		 *
8670 		 * So we need to update the IRQ control state in hardware
8671 		 * whenever the pipe turns on (since it could be previously
8672 		 * power gated) or off (since some pipes can't be power gated
8673 		 * on some ASICs).
8674 		 */
8675 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8676 			dm_update_pflip_irq_state(drm_to_adev(dev),
8677 						  acrtc_attach);
8678 
8679 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8680 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8681 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8682 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8683 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8684 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8685 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8686 			amdgpu_dm_psr_enable(acrtc_state->stream);
8687 		}
8688 
8689 		mutex_unlock(&dm->dc_lock);
8690 	}
8691 
8692 	/*
8693 	 * Update cursor state *after* programming all the planes.
8694 	 * This avoids redundant programming in the case where we're going
8695 	 * to be disabling a single plane - those pipes are being disabled.
8696 	 */
8697 	if (acrtc_state->active_planes)
8698 		amdgpu_dm_commit_cursors(state);
8699 
8700 cleanup:
8701 	kfree(bundle);
8702 }
8703 
8704 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8705 				   struct drm_atomic_state *state)
8706 {
8707 	struct amdgpu_device *adev = drm_to_adev(dev);
8708 	struct amdgpu_dm_connector *aconnector;
8709 	struct drm_connector *connector;
8710 	struct drm_connector_state *old_con_state, *new_con_state;
8711 	struct drm_crtc_state *new_crtc_state;
8712 	struct dm_crtc_state *new_dm_crtc_state;
8713 	const struct dc_stream_status *status;
8714 	int i, inst;
8715 
8716 	/* Notify device removals. */
8717 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8718 		if (old_con_state->crtc != new_con_state->crtc) {
8719 			/* CRTC changes require notification. */
8720 			goto notify;
8721 		}
8722 
8723 		if (!new_con_state->crtc)
8724 			continue;
8725 
8726 		new_crtc_state = drm_atomic_get_new_crtc_state(
8727 			state, new_con_state->crtc);
8728 
8729 		if (!new_crtc_state)
8730 			continue;
8731 
8732 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8733 			continue;
8734 
8735 	notify:
8736 		aconnector = to_amdgpu_dm_connector(connector);
8737 
8738 		mutex_lock(&adev->dm.audio_lock);
8739 		inst = aconnector->audio_inst;
8740 		aconnector->audio_inst = -1;
8741 		mutex_unlock(&adev->dm.audio_lock);
8742 
8743 		amdgpu_dm_audio_eld_notify(adev, inst);
8744 	}
8745 
8746 	/* Notify audio device additions. */
8747 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8748 		if (!new_con_state->crtc)
8749 			continue;
8750 
8751 		new_crtc_state = drm_atomic_get_new_crtc_state(
8752 			state, new_con_state->crtc);
8753 
8754 		if (!new_crtc_state)
8755 			continue;
8756 
8757 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8758 			continue;
8759 
8760 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8761 		if (!new_dm_crtc_state->stream)
8762 			continue;
8763 
8764 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8765 		if (!status)
8766 			continue;
8767 
8768 		aconnector = to_amdgpu_dm_connector(connector);
8769 
8770 		mutex_lock(&adev->dm.audio_lock);
8771 		inst = status->audio_inst;
8772 		aconnector->audio_inst = inst;
8773 		mutex_unlock(&adev->dm.audio_lock);
8774 
8775 		amdgpu_dm_audio_eld_notify(adev, inst);
8776 	}
8777 }
8778 
8779 /*
8780  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8781  * @crtc_state: the DRM CRTC state
8782  * @stream_state: the DC stream state.
8783  *
8784  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8785  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8786  */
8787 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8788 						struct dc_stream_state *stream_state)
8789 {
8790 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8791 }
8792 
8793 /**
8794  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8795  * @state: The atomic state to commit
8796  *
8797  * This will tell DC to commit the constructed DC state from atomic_check,
8798  * programming the hardware. Any failures here implies a hardware failure, since
8799  * atomic check should have filtered anything non-kosher.
8800  */
8801 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8802 {
8803 	struct drm_device *dev = state->dev;
8804 	struct amdgpu_device *adev = drm_to_adev(dev);
8805 	struct amdgpu_display_manager *dm = &adev->dm;
8806 	struct dm_atomic_state *dm_state;
8807 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8808 	uint32_t i, j;
8809 	struct drm_crtc *crtc;
8810 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8811 	unsigned long flags;
8812 	bool wait_for_vblank = true;
8813 	struct drm_connector *connector;
8814 	struct drm_connector_state *old_con_state, *new_con_state;
8815 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8816 	int crtc_disable_count = 0;
8817 	bool mode_set_reset_required = false;
8818 
8819 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8820 
8821 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8822 
8823 	dm_state = dm_atomic_get_new_state(state);
8824 	if (dm_state && dm_state->context) {
8825 		dc_state = dm_state->context;
8826 	} else {
8827 		/* No state changes, retain current state. */
8828 		dc_state_temp = dc_create_state(dm->dc);
8829 		ASSERT(dc_state_temp);
8830 		dc_state = dc_state_temp;
8831 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8832 	}
8833 
8834 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8835 				       new_crtc_state, i) {
8836 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8837 
8838 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8839 
8840 		if (old_crtc_state->active &&
8841 		    (!new_crtc_state->active ||
8842 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8843 			manage_dm_interrupts(adev, acrtc, false);
8844 			dc_stream_release(dm_old_crtc_state->stream);
8845 		}
8846 	}
8847 
8848 	drm_atomic_helper_calc_timestamping_constants(state);
8849 
8850 	/* update changed items */
8851 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8852 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8853 
8854 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8855 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8856 
8857 		DRM_DEBUG_ATOMIC(
8858 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8859 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8860 			"connectors_changed:%d\n",
8861 			acrtc->crtc_id,
8862 			new_crtc_state->enable,
8863 			new_crtc_state->active,
8864 			new_crtc_state->planes_changed,
8865 			new_crtc_state->mode_changed,
8866 			new_crtc_state->active_changed,
8867 			new_crtc_state->connectors_changed);
8868 
8869 		/* Disable cursor if disabling crtc */
8870 		if (old_crtc_state->active && !new_crtc_state->active) {
8871 			struct dc_cursor_position position;
8872 
8873 			memset(&position, 0, sizeof(position));
8874 			mutex_lock(&dm->dc_lock);
8875 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8876 			mutex_unlock(&dm->dc_lock);
8877 		}
8878 
8879 		/* Copy all transient state flags into dc state */
8880 		if (dm_new_crtc_state->stream) {
8881 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8882 							    dm_new_crtc_state->stream);
8883 		}
8884 
8885 		/* handles headless hotplug case, updating new_state and
8886 		 * aconnector as needed
8887 		 */
8888 
8889 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8890 
8891 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8892 
8893 			if (!dm_new_crtc_state->stream) {
8894 				/*
8895 				 * this could happen because of issues with
8896 				 * userspace notifications delivery.
8897 				 * In this case userspace tries to set mode on
8898 				 * display which is disconnected in fact.
8899 				 * dc_sink is NULL in this case on aconnector.
8900 				 * We expect reset mode will come soon.
8901 				 *
8902 				 * This can also happen when unplug is done
8903 				 * during resume sequence ended
8904 				 *
8905 				 * In this case, we want to pretend we still
8906 				 * have a sink to keep the pipe running so that
8907 				 * hw state is consistent with the sw state
8908 				 */
8909 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8910 						__func__, acrtc->base.base.id);
8911 				continue;
8912 			}
8913 
8914 			if (dm_old_crtc_state->stream)
8915 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8916 
8917 			pm_runtime_get_noresume(dev->dev);
8918 
8919 			acrtc->enabled = true;
8920 			acrtc->hw_mode = new_crtc_state->mode;
8921 			crtc->hwmode = new_crtc_state->mode;
8922 			mode_set_reset_required = true;
8923 		} else if (modereset_required(new_crtc_state)) {
8924 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8925 			/* i.e. reset mode */
8926 			if (dm_old_crtc_state->stream)
8927 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8928 
8929 			mode_set_reset_required = true;
8930 		}
8931 	} /* for_each_crtc_in_state() */
8932 
8933 	if (dc_state) {
8934 		/* if there mode set or reset, disable eDP PSR */
8935 		if (mode_set_reset_required)
8936 			amdgpu_dm_psr_disable_all(dm);
8937 
8938 		dm_enable_per_frame_crtc_master_sync(dc_state);
8939 		mutex_lock(&dm->dc_lock);
8940 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8941 #if defined(CONFIG_DRM_AMD_DC_DCN)
8942                /* Allow idle optimization when vblank count is 0 for display off */
8943                if (dm->active_vblank_irq_count == 0)
8944                    dc_allow_idle_optimizations(dm->dc,true);
8945 #endif
8946 		mutex_unlock(&dm->dc_lock);
8947 	}
8948 
8949 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8950 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8951 
8952 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8953 
8954 		if (dm_new_crtc_state->stream != NULL) {
8955 			const struct dc_stream_status *status =
8956 					dc_stream_get_status(dm_new_crtc_state->stream);
8957 
8958 			if (!status)
8959 				status = dc_stream_get_status_from_state(dc_state,
8960 									 dm_new_crtc_state->stream);
8961 			if (!status)
8962 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8963 			else
8964 				acrtc->otg_inst = status->primary_otg_inst;
8965 		}
8966 	}
8967 #ifdef CONFIG_DRM_AMD_DC_HDCP
8968 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8969 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8970 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8971 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8972 
8973 		new_crtc_state = NULL;
8974 
8975 		if (acrtc)
8976 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8977 
8978 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8979 
8980 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8981 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8982 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8983 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8984 			dm_new_con_state->update_hdcp = true;
8985 			continue;
8986 		}
8987 
8988 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8989 			hdcp_update_display(
8990 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8991 				new_con_state->hdcp_content_type,
8992 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8993 	}
8994 #endif
8995 
8996 	/* Handle connector state changes */
8997 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8998 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8999 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9000 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9001 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9002 		struct dc_stream_update stream_update;
9003 		struct dc_info_packet hdr_packet;
9004 		struct dc_stream_status *status = NULL;
9005 		bool abm_changed, hdr_changed, scaling_changed;
9006 
9007 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9008 		memset(&stream_update, 0, sizeof(stream_update));
9009 
9010 		if (acrtc) {
9011 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9012 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9013 		}
9014 
9015 		/* Skip any modesets/resets */
9016 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9017 			continue;
9018 
9019 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9020 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9021 
9022 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9023 							     dm_old_con_state);
9024 
9025 		abm_changed = dm_new_crtc_state->abm_level !=
9026 			      dm_old_crtc_state->abm_level;
9027 
9028 		hdr_changed =
9029 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9030 
9031 		if (!scaling_changed && !abm_changed && !hdr_changed)
9032 			continue;
9033 
9034 		stream_update.stream = dm_new_crtc_state->stream;
9035 		if (scaling_changed) {
9036 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9037 					dm_new_con_state, dm_new_crtc_state->stream);
9038 
9039 			stream_update.src = dm_new_crtc_state->stream->src;
9040 			stream_update.dst = dm_new_crtc_state->stream->dst;
9041 		}
9042 
9043 		if (abm_changed) {
9044 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9045 
9046 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9047 		}
9048 
9049 		if (hdr_changed) {
9050 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9051 			stream_update.hdr_static_metadata = &hdr_packet;
9052 		}
9053 
9054 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9055 
9056 		if (WARN_ON(!status))
9057 			continue;
9058 
9059 		WARN_ON(!status->plane_count);
9060 
9061 		/*
9062 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9063 		 * Here we create an empty update on each plane.
9064 		 * To fix this, DC should permit updating only stream properties.
9065 		 */
9066 		for (j = 0; j < status->plane_count; j++)
9067 			dummy_updates[j].surface = status->plane_states[0];
9068 
9069 
9070 		mutex_lock(&dm->dc_lock);
9071 		dc_commit_updates_for_stream(dm->dc,
9072 						     dummy_updates,
9073 						     status->plane_count,
9074 						     dm_new_crtc_state->stream,
9075 						     &stream_update,
9076 						     dc_state);
9077 		mutex_unlock(&dm->dc_lock);
9078 	}
9079 
9080 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9081 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9082 				      new_crtc_state, i) {
9083 		if (old_crtc_state->active && !new_crtc_state->active)
9084 			crtc_disable_count++;
9085 
9086 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9087 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9088 
9089 		/* For freesync config update on crtc state and params for irq */
9090 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9091 
9092 		/* Handle vrr on->off / off->on transitions */
9093 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9094 						dm_new_crtc_state);
9095 	}
9096 
9097 	/**
9098 	 * Enable interrupts for CRTCs that are newly enabled or went through
9099 	 * a modeset. It was intentionally deferred until after the front end
9100 	 * state was modified to wait until the OTG was on and so the IRQ
9101 	 * handlers didn't access stale or invalid state.
9102 	 */
9103 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9104 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9105 #ifdef CONFIG_DEBUG_FS
9106 		bool configure_crc = false;
9107 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9108 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9109 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9110 #endif
9111 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9112 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9113 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9114 #endif
9115 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9116 
9117 		if (new_crtc_state->active &&
9118 		    (!old_crtc_state->active ||
9119 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9120 			dc_stream_retain(dm_new_crtc_state->stream);
9121 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9122 			manage_dm_interrupts(adev, acrtc, true);
9123 
9124 #ifdef CONFIG_DEBUG_FS
9125 			/**
9126 			 * Frontend may have changed so reapply the CRC capture
9127 			 * settings for the stream.
9128 			 */
9129 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9130 
9131 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9132 				configure_crc = true;
9133 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9134 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9135 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9136 					acrtc->dm_irq_params.crc_window.update_win = true;
9137 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9138 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9139 					crc_rd_wrk->crtc = crtc;
9140 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9141 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9142 				}
9143 #endif
9144 			}
9145 
9146 			if (configure_crc)
9147 				if (amdgpu_dm_crtc_configure_crc_source(
9148 					crtc, dm_new_crtc_state, cur_crc_src))
9149 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9150 #endif
9151 		}
9152 	}
9153 
9154 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9155 		if (new_crtc_state->async_flip)
9156 			wait_for_vblank = false;
9157 
9158 	/* update planes when needed per crtc*/
9159 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9160 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9161 
9162 		if (dm_new_crtc_state->stream)
9163 			amdgpu_dm_commit_planes(state, dc_state, dev,
9164 						dm, crtc, wait_for_vblank);
9165 	}
9166 
9167 	/* Update audio instances for each connector. */
9168 	amdgpu_dm_commit_audio(dev, state);
9169 
9170 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9171 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9172 	/* restore the backlight level */
9173 	if (dm->backlight_dev)
9174 		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9175 #endif
9176 	/*
9177 	 * send vblank event on all events not handled in flip and
9178 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9179 	 */
9180 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9181 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9182 
9183 		if (new_crtc_state->event)
9184 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9185 
9186 		new_crtc_state->event = NULL;
9187 	}
9188 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9189 
9190 	/* Signal HW programming completion */
9191 	drm_atomic_helper_commit_hw_done(state);
9192 
9193 	if (wait_for_vblank)
9194 		drm_atomic_helper_wait_for_flip_done(dev, state);
9195 
9196 	drm_atomic_helper_cleanup_planes(dev, state);
9197 
9198 	/* return the stolen vga memory back to VRAM */
9199 	if (!adev->mman.keep_stolen_vga_memory)
9200 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9201 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9202 
9203 	/*
9204 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9205 	 * so we can put the GPU into runtime suspend if we're not driving any
9206 	 * displays anymore
9207 	 */
9208 	for (i = 0; i < crtc_disable_count; i++)
9209 		pm_runtime_put_autosuspend(dev->dev);
9210 	pm_runtime_mark_last_busy(dev->dev);
9211 
9212 	if (dc_state_temp)
9213 		dc_release_state(dc_state_temp);
9214 }
9215 
9216 
9217 static int dm_force_atomic_commit(struct drm_connector *connector)
9218 {
9219 	int ret = 0;
9220 	struct drm_device *ddev = connector->dev;
9221 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9222 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9223 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9224 	struct drm_connector_state *conn_state;
9225 	struct drm_crtc_state *crtc_state;
9226 	struct drm_plane_state *plane_state;
9227 
9228 	if (!state)
9229 		return -ENOMEM;
9230 
9231 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9232 
9233 	/* Construct an atomic state to restore previous display setting */
9234 
9235 	/*
9236 	 * Attach connectors to drm_atomic_state
9237 	 */
9238 	conn_state = drm_atomic_get_connector_state(state, connector);
9239 
9240 	ret = PTR_ERR_OR_ZERO(conn_state);
9241 	if (ret)
9242 		goto out;
9243 
9244 	/* Attach crtc to drm_atomic_state*/
9245 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9246 
9247 	ret = PTR_ERR_OR_ZERO(crtc_state);
9248 	if (ret)
9249 		goto out;
9250 
9251 	/* force a restore */
9252 	crtc_state->mode_changed = true;
9253 
9254 	/* Attach plane to drm_atomic_state */
9255 	plane_state = drm_atomic_get_plane_state(state, plane);
9256 
9257 	ret = PTR_ERR_OR_ZERO(plane_state);
9258 	if (ret)
9259 		goto out;
9260 
9261 	/* Call commit internally with the state we just constructed */
9262 	ret = drm_atomic_commit(state);
9263 
9264 out:
9265 	drm_atomic_state_put(state);
9266 	if (ret)
9267 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9268 
9269 	return ret;
9270 }
9271 
9272 /*
9273  * This function handles all cases when set mode does not come upon hotplug.
9274  * This includes when a display is unplugged then plugged back into the
9275  * same port and when running without usermode desktop manager supprot
9276  */
9277 void dm_restore_drm_connector_state(struct drm_device *dev,
9278 				    struct drm_connector *connector)
9279 {
9280 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9281 	struct amdgpu_crtc *disconnected_acrtc;
9282 	struct dm_crtc_state *acrtc_state;
9283 
9284 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9285 		return;
9286 
9287 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9288 	if (!disconnected_acrtc)
9289 		return;
9290 
9291 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9292 	if (!acrtc_state->stream)
9293 		return;
9294 
9295 	/*
9296 	 * If the previous sink is not released and different from the current,
9297 	 * we deduce we are in a state where we can not rely on usermode call
9298 	 * to turn on the display, so we do it here
9299 	 */
9300 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9301 		dm_force_atomic_commit(&aconnector->base);
9302 }
9303 
9304 /*
9305  * Grabs all modesetting locks to serialize against any blocking commits,
9306  * Waits for completion of all non blocking commits.
9307  */
9308 static int do_aquire_global_lock(struct drm_device *dev,
9309 				 struct drm_atomic_state *state)
9310 {
9311 	struct drm_crtc *crtc;
9312 	struct drm_crtc_commit *commit;
9313 	long ret;
9314 
9315 	/*
9316 	 * Adding all modeset locks to aquire_ctx will
9317 	 * ensure that when the framework release it the
9318 	 * extra locks we are locking here will get released to
9319 	 */
9320 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9321 	if (ret)
9322 		return ret;
9323 
9324 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9325 		spin_lock(&crtc->commit_lock);
9326 		commit = list_first_entry_or_null(&crtc->commit_list,
9327 				struct drm_crtc_commit, commit_entry);
9328 		if (commit)
9329 			drm_crtc_commit_get(commit);
9330 		spin_unlock(&crtc->commit_lock);
9331 
9332 		if (!commit)
9333 			continue;
9334 
9335 		/*
9336 		 * Make sure all pending HW programming completed and
9337 		 * page flips done
9338 		 */
9339 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9340 
9341 		if (ret > 0)
9342 			ret = wait_for_completion_interruptible_timeout(
9343 					&commit->flip_done, 10*HZ);
9344 
9345 		if (ret == 0)
9346 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9347 				  "timed out\n", crtc->base.id, crtc->name);
9348 
9349 		drm_crtc_commit_put(commit);
9350 	}
9351 
9352 	return ret < 0 ? ret : 0;
9353 }
9354 
9355 static void get_freesync_config_for_crtc(
9356 	struct dm_crtc_state *new_crtc_state,
9357 	struct dm_connector_state *new_con_state)
9358 {
9359 	struct mod_freesync_config config = {0};
9360 	struct amdgpu_dm_connector *aconnector =
9361 			to_amdgpu_dm_connector(new_con_state->base.connector);
9362 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9363 	int vrefresh = drm_mode_vrefresh(mode);
9364 	bool fs_vid_mode = false;
9365 
9366 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9367 					vrefresh >= aconnector->min_vfreq &&
9368 					vrefresh <= aconnector->max_vfreq;
9369 
9370 	if (new_crtc_state->vrr_supported) {
9371 		new_crtc_state->stream->ignore_msa_timing_param = true;
9372 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9373 
9374 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9375 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9376 		config.vsif_supported = true;
9377 		config.btr = true;
9378 
9379 		if (fs_vid_mode) {
9380 			config.state = VRR_STATE_ACTIVE_FIXED;
9381 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9382 			goto out;
9383 		} else if (new_crtc_state->base.vrr_enabled) {
9384 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9385 		} else {
9386 			config.state = VRR_STATE_INACTIVE;
9387 		}
9388 	}
9389 out:
9390 	new_crtc_state->freesync_config = config;
9391 }
9392 
9393 static void reset_freesync_config_for_crtc(
9394 	struct dm_crtc_state *new_crtc_state)
9395 {
9396 	new_crtc_state->vrr_supported = false;
9397 
9398 	memset(&new_crtc_state->vrr_infopacket, 0,
9399 	       sizeof(new_crtc_state->vrr_infopacket));
9400 }
9401 
9402 static bool
9403 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9404 				 struct drm_crtc_state *new_crtc_state)
9405 {
9406 	struct drm_display_mode old_mode, new_mode;
9407 
9408 	if (!old_crtc_state || !new_crtc_state)
9409 		return false;
9410 
9411 	old_mode = old_crtc_state->mode;
9412 	new_mode = new_crtc_state->mode;
9413 
9414 	if (old_mode.clock       == new_mode.clock &&
9415 	    old_mode.hdisplay    == new_mode.hdisplay &&
9416 	    old_mode.vdisplay    == new_mode.vdisplay &&
9417 	    old_mode.htotal      == new_mode.htotal &&
9418 	    old_mode.vtotal      != new_mode.vtotal &&
9419 	    old_mode.hsync_start == new_mode.hsync_start &&
9420 	    old_mode.vsync_start != new_mode.vsync_start &&
9421 	    old_mode.hsync_end   == new_mode.hsync_end &&
9422 	    old_mode.vsync_end   != new_mode.vsync_end &&
9423 	    old_mode.hskew       == new_mode.hskew &&
9424 	    old_mode.vscan       == new_mode.vscan &&
9425 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9426 	    (new_mode.vsync_end - new_mode.vsync_start))
9427 		return true;
9428 
9429 	return false;
9430 }
9431 
9432 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9433 	uint64_t num, den, res;
9434 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9435 
9436 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9437 
9438 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9439 	den = (unsigned long long)new_crtc_state->mode.htotal *
9440 	      (unsigned long long)new_crtc_state->mode.vtotal;
9441 
9442 	res = div_u64(num, den);
9443 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9444 }
9445 
9446 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9447 				struct drm_atomic_state *state,
9448 				struct drm_crtc *crtc,
9449 				struct drm_crtc_state *old_crtc_state,
9450 				struct drm_crtc_state *new_crtc_state,
9451 				bool enable,
9452 				bool *lock_and_validation_needed)
9453 {
9454 	struct dm_atomic_state *dm_state = NULL;
9455 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9456 	struct dc_stream_state *new_stream;
9457 	int ret = 0;
9458 
9459 	/*
9460 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9461 	 * update changed items
9462 	 */
9463 	struct amdgpu_crtc *acrtc = NULL;
9464 	struct amdgpu_dm_connector *aconnector = NULL;
9465 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9466 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9467 
9468 	new_stream = NULL;
9469 
9470 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9471 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9472 	acrtc = to_amdgpu_crtc(crtc);
9473 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9474 
9475 	/* TODO This hack should go away */
9476 	if (aconnector && enable) {
9477 		/* Make sure fake sink is created in plug-in scenario */
9478 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9479 							    &aconnector->base);
9480 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9481 							    &aconnector->base);
9482 
9483 		if (IS_ERR(drm_new_conn_state)) {
9484 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9485 			goto fail;
9486 		}
9487 
9488 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9489 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9490 
9491 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9492 			goto skip_modeset;
9493 
9494 		new_stream = create_validate_stream_for_sink(aconnector,
9495 							     &new_crtc_state->mode,
9496 							     dm_new_conn_state,
9497 							     dm_old_crtc_state->stream);
9498 
9499 		/*
9500 		 * we can have no stream on ACTION_SET if a display
9501 		 * was disconnected during S3, in this case it is not an
9502 		 * error, the OS will be updated after detection, and
9503 		 * will do the right thing on next atomic commit
9504 		 */
9505 
9506 		if (!new_stream) {
9507 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9508 					__func__, acrtc->base.base.id);
9509 			ret = -ENOMEM;
9510 			goto fail;
9511 		}
9512 
9513 		/*
9514 		 * TODO: Check VSDB bits to decide whether this should
9515 		 * be enabled or not.
9516 		 */
9517 		new_stream->triggered_crtc_reset.enabled =
9518 			dm->force_timing_sync;
9519 
9520 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9521 
9522 		ret = fill_hdr_info_packet(drm_new_conn_state,
9523 					   &new_stream->hdr_static_metadata);
9524 		if (ret)
9525 			goto fail;
9526 
9527 		/*
9528 		 * If we already removed the old stream from the context
9529 		 * (and set the new stream to NULL) then we can't reuse
9530 		 * the old stream even if the stream and scaling are unchanged.
9531 		 * We'll hit the BUG_ON and black screen.
9532 		 *
9533 		 * TODO: Refactor this function to allow this check to work
9534 		 * in all conditions.
9535 		 */
9536 		if (amdgpu_freesync_vid_mode &&
9537 		    dm_new_crtc_state->stream &&
9538 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9539 			goto skip_modeset;
9540 
9541 		if (dm_new_crtc_state->stream &&
9542 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9543 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9544 			new_crtc_state->mode_changed = false;
9545 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9546 					 new_crtc_state->mode_changed);
9547 		}
9548 	}
9549 
9550 	/* mode_changed flag may get updated above, need to check again */
9551 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9552 		goto skip_modeset;
9553 
9554 	DRM_DEBUG_ATOMIC(
9555 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9556 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9557 		"connectors_changed:%d\n",
9558 		acrtc->crtc_id,
9559 		new_crtc_state->enable,
9560 		new_crtc_state->active,
9561 		new_crtc_state->planes_changed,
9562 		new_crtc_state->mode_changed,
9563 		new_crtc_state->active_changed,
9564 		new_crtc_state->connectors_changed);
9565 
9566 	/* Remove stream for any changed/disabled CRTC */
9567 	if (!enable) {
9568 
9569 		if (!dm_old_crtc_state->stream)
9570 			goto skip_modeset;
9571 
9572 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9573 		    is_timing_unchanged_for_freesync(new_crtc_state,
9574 						     old_crtc_state)) {
9575 			new_crtc_state->mode_changed = false;
9576 			DRM_DEBUG_DRIVER(
9577 				"Mode change not required for front porch change, "
9578 				"setting mode_changed to %d",
9579 				new_crtc_state->mode_changed);
9580 
9581 			set_freesync_fixed_config(dm_new_crtc_state);
9582 
9583 			goto skip_modeset;
9584 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9585 			   is_freesync_video_mode(&new_crtc_state->mode,
9586 						  aconnector)) {
9587 			set_freesync_fixed_config(dm_new_crtc_state);
9588 		}
9589 
9590 		ret = dm_atomic_get_state(state, &dm_state);
9591 		if (ret)
9592 			goto fail;
9593 
9594 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9595 				crtc->base.id);
9596 
9597 		/* i.e. reset mode */
9598 		if (dc_remove_stream_from_ctx(
9599 				dm->dc,
9600 				dm_state->context,
9601 				dm_old_crtc_state->stream) != DC_OK) {
9602 			ret = -EINVAL;
9603 			goto fail;
9604 		}
9605 
9606 		dc_stream_release(dm_old_crtc_state->stream);
9607 		dm_new_crtc_state->stream = NULL;
9608 
9609 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9610 
9611 		*lock_and_validation_needed = true;
9612 
9613 	} else {/* Add stream for any updated/enabled CRTC */
9614 		/*
9615 		 * Quick fix to prevent NULL pointer on new_stream when
9616 		 * added MST connectors not found in existing crtc_state in the chained mode
9617 		 * TODO: need to dig out the root cause of that
9618 		 */
9619 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9620 			goto skip_modeset;
9621 
9622 		if (modereset_required(new_crtc_state))
9623 			goto skip_modeset;
9624 
9625 		if (modeset_required(new_crtc_state, new_stream,
9626 				     dm_old_crtc_state->stream)) {
9627 
9628 			WARN_ON(dm_new_crtc_state->stream);
9629 
9630 			ret = dm_atomic_get_state(state, &dm_state);
9631 			if (ret)
9632 				goto fail;
9633 
9634 			dm_new_crtc_state->stream = new_stream;
9635 
9636 			dc_stream_retain(new_stream);
9637 
9638 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9639 					 crtc->base.id);
9640 
9641 			if (dc_add_stream_to_ctx(
9642 					dm->dc,
9643 					dm_state->context,
9644 					dm_new_crtc_state->stream) != DC_OK) {
9645 				ret = -EINVAL;
9646 				goto fail;
9647 			}
9648 
9649 			*lock_and_validation_needed = true;
9650 		}
9651 	}
9652 
9653 skip_modeset:
9654 	/* Release extra reference */
9655 	if (new_stream)
9656 		 dc_stream_release(new_stream);
9657 
9658 	/*
9659 	 * We want to do dc stream updates that do not require a
9660 	 * full modeset below.
9661 	 */
9662 	if (!(enable && aconnector && new_crtc_state->active))
9663 		return 0;
9664 	/*
9665 	 * Given above conditions, the dc state cannot be NULL because:
9666 	 * 1. We're in the process of enabling CRTCs (just been added
9667 	 *    to the dc context, or already is on the context)
9668 	 * 2. Has a valid connector attached, and
9669 	 * 3. Is currently active and enabled.
9670 	 * => The dc stream state currently exists.
9671 	 */
9672 	BUG_ON(dm_new_crtc_state->stream == NULL);
9673 
9674 	/* Scaling or underscan settings */
9675 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9676 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9677 		update_stream_scaling_settings(
9678 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9679 
9680 	/* ABM settings */
9681 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9682 
9683 	/*
9684 	 * Color management settings. We also update color properties
9685 	 * when a modeset is needed, to ensure it gets reprogrammed.
9686 	 */
9687 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9688 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9689 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9690 		if (ret)
9691 			goto fail;
9692 	}
9693 
9694 	/* Update Freesync settings. */
9695 	get_freesync_config_for_crtc(dm_new_crtc_state,
9696 				     dm_new_conn_state);
9697 
9698 	return ret;
9699 
9700 fail:
9701 	if (new_stream)
9702 		dc_stream_release(new_stream);
9703 	return ret;
9704 }
9705 
9706 static bool should_reset_plane(struct drm_atomic_state *state,
9707 			       struct drm_plane *plane,
9708 			       struct drm_plane_state *old_plane_state,
9709 			       struct drm_plane_state *new_plane_state)
9710 {
9711 	struct drm_plane *other;
9712 	struct drm_plane_state *old_other_state, *new_other_state;
9713 	struct drm_crtc_state *new_crtc_state;
9714 	int i;
9715 
9716 	/*
9717 	 * TODO: Remove this hack once the checks below are sufficient
9718 	 * enough to determine when we need to reset all the planes on
9719 	 * the stream.
9720 	 */
9721 	if (state->allow_modeset)
9722 		return true;
9723 
9724 	/* Exit early if we know that we're adding or removing the plane. */
9725 	if (old_plane_state->crtc != new_plane_state->crtc)
9726 		return true;
9727 
9728 	/* old crtc == new_crtc == NULL, plane not in context. */
9729 	if (!new_plane_state->crtc)
9730 		return false;
9731 
9732 	new_crtc_state =
9733 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9734 
9735 	if (!new_crtc_state)
9736 		return true;
9737 
9738 	/* CRTC Degamma changes currently require us to recreate planes. */
9739 	if (new_crtc_state->color_mgmt_changed)
9740 		return true;
9741 
9742 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9743 		return true;
9744 
9745 	/*
9746 	 * If there are any new primary or overlay planes being added or
9747 	 * removed then the z-order can potentially change. To ensure
9748 	 * correct z-order and pipe acquisition the current DC architecture
9749 	 * requires us to remove and recreate all existing planes.
9750 	 *
9751 	 * TODO: Come up with a more elegant solution for this.
9752 	 */
9753 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9754 		struct amdgpu_framebuffer *old_afb, *new_afb;
9755 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9756 			continue;
9757 
9758 		if (old_other_state->crtc != new_plane_state->crtc &&
9759 		    new_other_state->crtc != new_plane_state->crtc)
9760 			continue;
9761 
9762 		if (old_other_state->crtc != new_other_state->crtc)
9763 			return true;
9764 
9765 		/* Src/dst size and scaling updates. */
9766 		if (old_other_state->src_w != new_other_state->src_w ||
9767 		    old_other_state->src_h != new_other_state->src_h ||
9768 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9769 		    old_other_state->crtc_h != new_other_state->crtc_h)
9770 			return true;
9771 
9772 		/* Rotation / mirroring updates. */
9773 		if (old_other_state->rotation != new_other_state->rotation)
9774 			return true;
9775 
9776 		/* Blending updates. */
9777 		if (old_other_state->pixel_blend_mode !=
9778 		    new_other_state->pixel_blend_mode)
9779 			return true;
9780 
9781 		/* Alpha updates. */
9782 		if (old_other_state->alpha != new_other_state->alpha)
9783 			return true;
9784 
9785 		/* Colorspace changes. */
9786 		if (old_other_state->color_range != new_other_state->color_range ||
9787 		    old_other_state->color_encoding != new_other_state->color_encoding)
9788 			return true;
9789 
9790 		/* Framebuffer checks fall at the end. */
9791 		if (!old_other_state->fb || !new_other_state->fb)
9792 			continue;
9793 
9794 		/* Pixel format changes can require bandwidth updates. */
9795 		if (old_other_state->fb->format != new_other_state->fb->format)
9796 			return true;
9797 
9798 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9799 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9800 
9801 		/* Tiling and DCC changes also require bandwidth updates. */
9802 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9803 		    old_afb->base.modifier != new_afb->base.modifier)
9804 			return true;
9805 	}
9806 
9807 	return false;
9808 }
9809 
9810 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9811 			      struct drm_plane_state *new_plane_state,
9812 			      struct drm_framebuffer *fb)
9813 {
9814 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9815 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9816 	unsigned int pitch;
9817 	bool linear;
9818 
9819 	if (fb->width > new_acrtc->max_cursor_width ||
9820 	    fb->height > new_acrtc->max_cursor_height) {
9821 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9822 				 new_plane_state->fb->width,
9823 				 new_plane_state->fb->height);
9824 		return -EINVAL;
9825 	}
9826 	if (new_plane_state->src_w != fb->width << 16 ||
9827 	    new_plane_state->src_h != fb->height << 16) {
9828 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9829 		return -EINVAL;
9830 	}
9831 
9832 	/* Pitch in pixels */
9833 	pitch = fb->pitches[0] / fb->format->cpp[0];
9834 
9835 	if (fb->width != pitch) {
9836 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9837 				 fb->width, pitch);
9838 		return -EINVAL;
9839 	}
9840 
9841 	switch (pitch) {
9842 	case 64:
9843 	case 128:
9844 	case 256:
9845 		/* FB pitch is supported by cursor plane */
9846 		break;
9847 	default:
9848 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9849 		return -EINVAL;
9850 	}
9851 
9852 	/* Core DRM takes care of checking FB modifiers, so we only need to
9853 	 * check tiling flags when the FB doesn't have a modifier. */
9854 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9855 		if (adev->family < AMDGPU_FAMILY_AI) {
9856 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9857 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9858 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9859 		} else {
9860 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9861 		}
9862 		if (!linear) {
9863 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9864 			return -EINVAL;
9865 		}
9866 	}
9867 
9868 	return 0;
9869 }
9870 
9871 static int dm_update_plane_state(struct dc *dc,
9872 				 struct drm_atomic_state *state,
9873 				 struct drm_plane *plane,
9874 				 struct drm_plane_state *old_plane_state,
9875 				 struct drm_plane_state *new_plane_state,
9876 				 bool enable,
9877 				 bool *lock_and_validation_needed)
9878 {
9879 
9880 	struct dm_atomic_state *dm_state = NULL;
9881 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9882 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9883 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9884 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9885 	struct amdgpu_crtc *new_acrtc;
9886 	bool needs_reset;
9887 	int ret = 0;
9888 
9889 
9890 	new_plane_crtc = new_plane_state->crtc;
9891 	old_plane_crtc = old_plane_state->crtc;
9892 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9893 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9894 
9895 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9896 		if (!enable || !new_plane_crtc ||
9897 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9898 			return 0;
9899 
9900 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9901 
9902 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9903 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9904 			return -EINVAL;
9905 		}
9906 
9907 		if (new_plane_state->fb) {
9908 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9909 						 new_plane_state->fb);
9910 			if (ret)
9911 				return ret;
9912 		}
9913 
9914 		return 0;
9915 	}
9916 
9917 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9918 					 new_plane_state);
9919 
9920 	/* Remove any changed/removed planes */
9921 	if (!enable) {
9922 		if (!needs_reset)
9923 			return 0;
9924 
9925 		if (!old_plane_crtc)
9926 			return 0;
9927 
9928 		old_crtc_state = drm_atomic_get_old_crtc_state(
9929 				state, old_plane_crtc);
9930 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9931 
9932 		if (!dm_old_crtc_state->stream)
9933 			return 0;
9934 
9935 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9936 				plane->base.id, old_plane_crtc->base.id);
9937 
9938 		ret = dm_atomic_get_state(state, &dm_state);
9939 		if (ret)
9940 			return ret;
9941 
9942 		if (!dc_remove_plane_from_context(
9943 				dc,
9944 				dm_old_crtc_state->stream,
9945 				dm_old_plane_state->dc_state,
9946 				dm_state->context)) {
9947 
9948 			return -EINVAL;
9949 		}
9950 
9951 
9952 		dc_plane_state_release(dm_old_plane_state->dc_state);
9953 		dm_new_plane_state->dc_state = NULL;
9954 
9955 		*lock_and_validation_needed = true;
9956 
9957 	} else { /* Add new planes */
9958 		struct dc_plane_state *dc_new_plane_state;
9959 
9960 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9961 			return 0;
9962 
9963 		if (!new_plane_crtc)
9964 			return 0;
9965 
9966 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9967 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9968 
9969 		if (!dm_new_crtc_state->stream)
9970 			return 0;
9971 
9972 		if (!needs_reset)
9973 			return 0;
9974 
9975 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9976 		if (ret)
9977 			return ret;
9978 
9979 		WARN_ON(dm_new_plane_state->dc_state);
9980 
9981 		dc_new_plane_state = dc_create_plane_state(dc);
9982 		if (!dc_new_plane_state)
9983 			return -ENOMEM;
9984 
9985 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9986 				 plane->base.id, new_plane_crtc->base.id);
9987 
9988 		ret = fill_dc_plane_attributes(
9989 			drm_to_adev(new_plane_crtc->dev),
9990 			dc_new_plane_state,
9991 			new_plane_state,
9992 			new_crtc_state);
9993 		if (ret) {
9994 			dc_plane_state_release(dc_new_plane_state);
9995 			return ret;
9996 		}
9997 
9998 		ret = dm_atomic_get_state(state, &dm_state);
9999 		if (ret) {
10000 			dc_plane_state_release(dc_new_plane_state);
10001 			return ret;
10002 		}
10003 
10004 		/*
10005 		 * Any atomic check errors that occur after this will
10006 		 * not need a release. The plane state will be attached
10007 		 * to the stream, and therefore part of the atomic
10008 		 * state. It'll be released when the atomic state is
10009 		 * cleaned.
10010 		 */
10011 		if (!dc_add_plane_to_context(
10012 				dc,
10013 				dm_new_crtc_state->stream,
10014 				dc_new_plane_state,
10015 				dm_state->context)) {
10016 
10017 			dc_plane_state_release(dc_new_plane_state);
10018 			return -EINVAL;
10019 		}
10020 
10021 		dm_new_plane_state->dc_state = dc_new_plane_state;
10022 
10023 		/* Tell DC to do a full surface update every time there
10024 		 * is a plane change. Inefficient, but works for now.
10025 		 */
10026 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10027 
10028 		*lock_and_validation_needed = true;
10029 	}
10030 
10031 
10032 	return ret;
10033 }
10034 
10035 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10036 				struct drm_crtc *crtc,
10037 				struct drm_crtc_state *new_crtc_state)
10038 {
10039 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10040 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10041 
10042 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10043 	 * cursor per pipe but it's going to inherit the scaling and
10044 	 * positioning from the underlying pipe. Check the cursor plane's
10045 	 * blending properties match the primary plane's. */
10046 
10047 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10048 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10049 	if (!new_cursor_state || !new_primary_state ||
10050 	    !new_cursor_state->fb || !new_primary_state->fb) {
10051 		return 0;
10052 	}
10053 
10054 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10055 			 (new_cursor_state->src_w >> 16);
10056 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10057 			 (new_cursor_state->src_h >> 16);
10058 
10059 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10060 			 (new_primary_state->src_w >> 16);
10061 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10062 			 (new_primary_state->src_h >> 16);
10063 
10064 	if (cursor_scale_w != primary_scale_w ||
10065 	    cursor_scale_h != primary_scale_h) {
10066 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10067 		return -EINVAL;
10068 	}
10069 
10070 	return 0;
10071 }
10072 
10073 #if defined(CONFIG_DRM_AMD_DC_DCN)
10074 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10075 {
10076 	struct drm_connector *connector;
10077 	struct drm_connector_state *conn_state;
10078 	struct amdgpu_dm_connector *aconnector = NULL;
10079 	int i;
10080 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10081 		if (conn_state->crtc != crtc)
10082 			continue;
10083 
10084 		aconnector = to_amdgpu_dm_connector(connector);
10085 		if (!aconnector->port || !aconnector->mst_port)
10086 			aconnector = NULL;
10087 		else
10088 			break;
10089 	}
10090 
10091 	if (!aconnector)
10092 		return 0;
10093 
10094 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10095 }
10096 #endif
10097 
10098 static int validate_overlay(struct drm_atomic_state *state)
10099 {
10100 	int i;
10101 	struct drm_plane *plane;
10102 	struct drm_plane_state *new_plane_state;
10103 	struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10104 
10105 	/* Check if primary plane is contained inside overlay */
10106 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10107 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10108 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10109 				return 0;
10110 
10111 			overlay_state = new_plane_state;
10112 			continue;
10113 		}
10114 	}
10115 
10116 	/* check if we're making changes to the overlay plane */
10117 	if (!overlay_state)
10118 		return 0;
10119 
10120 	/* check if overlay plane is enabled */
10121 	if (!overlay_state->crtc)
10122 		return 0;
10123 
10124 	/* find the primary plane for the CRTC that the overlay is enabled on */
10125 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10126 	if (IS_ERR(primary_state))
10127 		return PTR_ERR(primary_state);
10128 
10129 	/* check if primary plane is enabled */
10130 	if (!primary_state->crtc)
10131 		return 0;
10132 
10133 	/* check if cursor plane is enabled */
10134 	cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10135 	if (IS_ERR(cursor_state))
10136 		return PTR_ERR(cursor_state);
10137 
10138 	if (drm_atomic_plane_disabling(plane->state, cursor_state))
10139 		return 0;
10140 
10141 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10142 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10143 	    primary_state->crtc_y < overlay_state->crtc_y ||
10144 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10145 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10146 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10147 		return -EINVAL;
10148 	}
10149 
10150 	return 0;
10151 }
10152 
10153 /**
10154  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10155  * @dev: The DRM device
10156  * @state: The atomic state to commit
10157  *
10158  * Validate that the given atomic state is programmable by DC into hardware.
10159  * This involves constructing a &struct dc_state reflecting the new hardware
10160  * state we wish to commit, then querying DC to see if it is programmable. It's
10161  * important not to modify the existing DC state. Otherwise, atomic_check
10162  * may unexpectedly commit hardware changes.
10163  *
10164  * When validating the DC state, it's important that the right locks are
10165  * acquired. For full updates case which removes/adds/updates streams on one
10166  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10167  * that any such full update commit will wait for completion of any outstanding
10168  * flip using DRMs synchronization events.
10169  *
10170  * Note that DM adds the affected connectors for all CRTCs in state, when that
10171  * might not seem necessary. This is because DC stream creation requires the
10172  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10173  * be possible but non-trivial - a possible TODO item.
10174  *
10175  * Return: -Error code if validation failed.
10176  */
10177 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10178 				  struct drm_atomic_state *state)
10179 {
10180 	struct amdgpu_device *adev = drm_to_adev(dev);
10181 	struct dm_atomic_state *dm_state = NULL;
10182 	struct dc *dc = adev->dm.dc;
10183 	struct drm_connector *connector;
10184 	struct drm_connector_state *old_con_state, *new_con_state;
10185 	struct drm_crtc *crtc;
10186 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10187 	struct drm_plane *plane;
10188 	struct drm_plane_state *old_plane_state, *new_plane_state;
10189 	enum dc_status status;
10190 	int ret, i;
10191 	bool lock_and_validation_needed = false;
10192 	struct dm_crtc_state *dm_old_crtc_state;
10193 
10194 	trace_amdgpu_dm_atomic_check_begin(state);
10195 
10196 	ret = drm_atomic_helper_check_modeset(dev, state);
10197 	if (ret)
10198 		goto fail;
10199 
10200 	/* Check connector changes */
10201 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10202 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10203 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10204 
10205 		/* Skip connectors that are disabled or part of modeset already. */
10206 		if (!old_con_state->crtc && !new_con_state->crtc)
10207 			continue;
10208 
10209 		if (!new_con_state->crtc)
10210 			continue;
10211 
10212 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10213 		if (IS_ERR(new_crtc_state)) {
10214 			ret = PTR_ERR(new_crtc_state);
10215 			goto fail;
10216 		}
10217 
10218 		if (dm_old_con_state->abm_level !=
10219 		    dm_new_con_state->abm_level)
10220 			new_crtc_state->connectors_changed = true;
10221 	}
10222 
10223 #if defined(CONFIG_DRM_AMD_DC_DCN)
10224 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10225 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10226 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10227 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10228 				if (ret)
10229 					goto fail;
10230 			}
10231 		}
10232 	}
10233 #endif
10234 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10235 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10236 
10237 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10238 		    !new_crtc_state->color_mgmt_changed &&
10239 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10240 			dm_old_crtc_state->dsc_force_changed == false)
10241 			continue;
10242 
10243 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10244 		if (ret)
10245 			goto fail;
10246 
10247 		if (!new_crtc_state->enable)
10248 			continue;
10249 
10250 		ret = drm_atomic_add_affected_connectors(state, crtc);
10251 		if (ret)
10252 			return ret;
10253 
10254 		ret = drm_atomic_add_affected_planes(state, crtc);
10255 		if (ret)
10256 			goto fail;
10257 
10258 		if (dm_old_crtc_state->dsc_force_changed)
10259 			new_crtc_state->mode_changed = true;
10260 	}
10261 
10262 	/*
10263 	 * Add all primary and overlay planes on the CRTC to the state
10264 	 * whenever a plane is enabled to maintain correct z-ordering
10265 	 * and to enable fast surface updates.
10266 	 */
10267 	drm_for_each_crtc(crtc, dev) {
10268 		bool modified = false;
10269 
10270 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10271 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10272 				continue;
10273 
10274 			if (new_plane_state->crtc == crtc ||
10275 			    old_plane_state->crtc == crtc) {
10276 				modified = true;
10277 				break;
10278 			}
10279 		}
10280 
10281 		if (!modified)
10282 			continue;
10283 
10284 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10285 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10286 				continue;
10287 
10288 			new_plane_state =
10289 				drm_atomic_get_plane_state(state, plane);
10290 
10291 			if (IS_ERR(new_plane_state)) {
10292 				ret = PTR_ERR(new_plane_state);
10293 				goto fail;
10294 			}
10295 		}
10296 	}
10297 
10298 	/* Remove exiting planes if they are modified */
10299 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10300 		ret = dm_update_plane_state(dc, state, plane,
10301 					    old_plane_state,
10302 					    new_plane_state,
10303 					    false,
10304 					    &lock_and_validation_needed);
10305 		if (ret)
10306 			goto fail;
10307 	}
10308 
10309 	/* Disable all crtcs which require disable */
10310 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10311 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10312 					   old_crtc_state,
10313 					   new_crtc_state,
10314 					   false,
10315 					   &lock_and_validation_needed);
10316 		if (ret)
10317 			goto fail;
10318 	}
10319 
10320 	/* Enable all crtcs which require enable */
10321 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10322 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10323 					   old_crtc_state,
10324 					   new_crtc_state,
10325 					   true,
10326 					   &lock_and_validation_needed);
10327 		if (ret)
10328 			goto fail;
10329 	}
10330 
10331 	ret = validate_overlay(state);
10332 	if (ret)
10333 		goto fail;
10334 
10335 	/* Add new/modified planes */
10336 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10337 		ret = dm_update_plane_state(dc, state, plane,
10338 					    old_plane_state,
10339 					    new_plane_state,
10340 					    true,
10341 					    &lock_and_validation_needed);
10342 		if (ret)
10343 			goto fail;
10344 	}
10345 
10346 	/* Run this here since we want to validate the streams we created */
10347 	ret = drm_atomic_helper_check_planes(dev, state);
10348 	if (ret)
10349 		goto fail;
10350 
10351 	/* Check cursor planes scaling */
10352 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10353 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10354 		if (ret)
10355 			goto fail;
10356 	}
10357 
10358 	if (state->legacy_cursor_update) {
10359 		/*
10360 		 * This is a fast cursor update coming from the plane update
10361 		 * helper, check if it can be done asynchronously for better
10362 		 * performance.
10363 		 */
10364 		state->async_update =
10365 			!drm_atomic_helper_async_check(dev, state);
10366 
10367 		/*
10368 		 * Skip the remaining global validation if this is an async
10369 		 * update. Cursor updates can be done without affecting
10370 		 * state or bandwidth calcs and this avoids the performance
10371 		 * penalty of locking the private state object and
10372 		 * allocating a new dc_state.
10373 		 */
10374 		if (state->async_update)
10375 			return 0;
10376 	}
10377 
10378 	/* Check scaling and underscan changes*/
10379 	/* TODO Removed scaling changes validation due to inability to commit
10380 	 * new stream into context w\o causing full reset. Need to
10381 	 * decide how to handle.
10382 	 */
10383 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10384 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10385 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10386 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10387 
10388 		/* Skip any modesets/resets */
10389 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10390 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10391 			continue;
10392 
10393 		/* Skip any thing not scale or underscan changes */
10394 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10395 			continue;
10396 
10397 		lock_and_validation_needed = true;
10398 	}
10399 
10400 	/**
10401 	 * Streams and planes are reset when there are changes that affect
10402 	 * bandwidth. Anything that affects bandwidth needs to go through
10403 	 * DC global validation to ensure that the configuration can be applied
10404 	 * to hardware.
10405 	 *
10406 	 * We have to currently stall out here in atomic_check for outstanding
10407 	 * commits to finish in this case because our IRQ handlers reference
10408 	 * DRM state directly - we can end up disabling interrupts too early
10409 	 * if we don't.
10410 	 *
10411 	 * TODO: Remove this stall and drop DM state private objects.
10412 	 */
10413 	if (lock_and_validation_needed) {
10414 		ret = dm_atomic_get_state(state, &dm_state);
10415 		if (ret)
10416 			goto fail;
10417 
10418 		ret = do_aquire_global_lock(dev, state);
10419 		if (ret)
10420 			goto fail;
10421 
10422 #if defined(CONFIG_DRM_AMD_DC_DCN)
10423 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10424 			goto fail;
10425 
10426 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10427 		if (ret)
10428 			goto fail;
10429 #endif
10430 
10431 		/*
10432 		 * Perform validation of MST topology in the state:
10433 		 * We need to perform MST atomic check before calling
10434 		 * dc_validate_global_state(), or there is a chance
10435 		 * to get stuck in an infinite loop and hang eventually.
10436 		 */
10437 		ret = drm_dp_mst_atomic_check(state);
10438 		if (ret)
10439 			goto fail;
10440 		status = dc_validate_global_state(dc, dm_state->context, false);
10441 		if (status != DC_OK) {
10442 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10443 				       dc_status_to_str(status), status);
10444 			ret = -EINVAL;
10445 			goto fail;
10446 		}
10447 	} else {
10448 		/*
10449 		 * The commit is a fast update. Fast updates shouldn't change
10450 		 * the DC context, affect global validation, and can have their
10451 		 * commit work done in parallel with other commits not touching
10452 		 * the same resource. If we have a new DC context as part of
10453 		 * the DM atomic state from validation we need to free it and
10454 		 * retain the existing one instead.
10455 		 *
10456 		 * Furthermore, since the DM atomic state only contains the DC
10457 		 * context and can safely be annulled, we can free the state
10458 		 * and clear the associated private object now to free
10459 		 * some memory and avoid a possible use-after-free later.
10460 		 */
10461 
10462 		for (i = 0; i < state->num_private_objs; i++) {
10463 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10464 
10465 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10466 				int j = state->num_private_objs-1;
10467 
10468 				dm_atomic_destroy_state(obj,
10469 						state->private_objs[i].state);
10470 
10471 				/* If i is not at the end of the array then the
10472 				 * last element needs to be moved to where i was
10473 				 * before the array can safely be truncated.
10474 				 */
10475 				if (i != j)
10476 					state->private_objs[i] =
10477 						state->private_objs[j];
10478 
10479 				state->private_objs[j].ptr = NULL;
10480 				state->private_objs[j].state = NULL;
10481 				state->private_objs[j].old_state = NULL;
10482 				state->private_objs[j].new_state = NULL;
10483 
10484 				state->num_private_objs = j;
10485 				break;
10486 			}
10487 		}
10488 	}
10489 
10490 	/* Store the overall update type for use later in atomic check. */
10491 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10492 		struct dm_crtc_state *dm_new_crtc_state =
10493 			to_dm_crtc_state(new_crtc_state);
10494 
10495 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10496 							 UPDATE_TYPE_FULL :
10497 							 UPDATE_TYPE_FAST;
10498 	}
10499 
10500 	/* Must be success */
10501 	WARN_ON(ret);
10502 
10503 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10504 
10505 	return ret;
10506 
10507 fail:
10508 	if (ret == -EDEADLK)
10509 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10510 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10511 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10512 	else
10513 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10514 
10515 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10516 
10517 	return ret;
10518 }
10519 
10520 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10521 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10522 {
10523 	uint8_t dpcd_data;
10524 	bool capable = false;
10525 
10526 	if (amdgpu_dm_connector->dc_link &&
10527 		dm_helpers_dp_read_dpcd(
10528 				NULL,
10529 				amdgpu_dm_connector->dc_link,
10530 				DP_DOWN_STREAM_PORT_COUNT,
10531 				&dpcd_data,
10532 				sizeof(dpcd_data))) {
10533 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10534 	}
10535 
10536 	return capable;
10537 }
10538 
10539 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10540 		uint8_t *edid_ext, int len,
10541 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10542 {
10543 	int i;
10544 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10545 	struct dc *dc = adev->dm.dc;
10546 
10547 	/* send extension block to DMCU for parsing */
10548 	for (i = 0; i < len; i += 8) {
10549 		bool res;
10550 		int offset;
10551 
10552 		/* send 8 bytes a time */
10553 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10554 			return false;
10555 
10556 		if (i+8 == len) {
10557 			/* EDID block sent completed, expect result */
10558 			int version, min_rate, max_rate;
10559 
10560 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10561 			if (res) {
10562 				/* amd vsdb found */
10563 				vsdb_info->freesync_supported = 1;
10564 				vsdb_info->amd_vsdb_version = version;
10565 				vsdb_info->min_refresh_rate_hz = min_rate;
10566 				vsdb_info->max_refresh_rate_hz = max_rate;
10567 				return true;
10568 			}
10569 			/* not amd vsdb */
10570 			return false;
10571 		}
10572 
10573 		/* check for ack*/
10574 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10575 		if (!res)
10576 			return false;
10577 	}
10578 
10579 	return false;
10580 }
10581 
10582 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10583 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10584 {
10585 	uint8_t *edid_ext = NULL;
10586 	int i;
10587 	bool valid_vsdb_found = false;
10588 
10589 	/*----- drm_find_cea_extension() -----*/
10590 	/* No EDID or EDID extensions */
10591 	if (edid == NULL || edid->extensions == 0)
10592 		return -ENODEV;
10593 
10594 	/* Find CEA extension */
10595 	for (i = 0; i < edid->extensions; i++) {
10596 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10597 		if (edid_ext[0] == CEA_EXT)
10598 			break;
10599 	}
10600 
10601 	if (i == edid->extensions)
10602 		return -ENODEV;
10603 
10604 	/*----- cea_db_offsets() -----*/
10605 	if (edid_ext[0] != CEA_EXT)
10606 		return -ENODEV;
10607 
10608 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10609 
10610 	return valid_vsdb_found ? i : -ENODEV;
10611 }
10612 
10613 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10614 					struct edid *edid)
10615 {
10616 	int i = 0;
10617 	struct detailed_timing *timing;
10618 	struct detailed_non_pixel *data;
10619 	struct detailed_data_monitor_range *range;
10620 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10621 			to_amdgpu_dm_connector(connector);
10622 	struct dm_connector_state *dm_con_state = NULL;
10623 
10624 	struct drm_device *dev = connector->dev;
10625 	struct amdgpu_device *adev = drm_to_adev(dev);
10626 	bool freesync_capable = false;
10627 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10628 
10629 	if (!connector->state) {
10630 		DRM_ERROR("%s - Connector has no state", __func__);
10631 		goto update;
10632 	}
10633 
10634 	if (!edid) {
10635 		dm_con_state = to_dm_connector_state(connector->state);
10636 
10637 		amdgpu_dm_connector->min_vfreq = 0;
10638 		amdgpu_dm_connector->max_vfreq = 0;
10639 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10640 
10641 		goto update;
10642 	}
10643 
10644 	dm_con_state = to_dm_connector_state(connector->state);
10645 
10646 	if (!amdgpu_dm_connector->dc_sink) {
10647 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10648 		goto update;
10649 	}
10650 	if (!adev->dm.freesync_module)
10651 		goto update;
10652 
10653 
10654 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10655 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10656 		bool edid_check_required = false;
10657 
10658 		if (edid) {
10659 			edid_check_required = is_dp_capable_without_timing_msa(
10660 						adev->dm.dc,
10661 						amdgpu_dm_connector);
10662 		}
10663 
10664 		if (edid_check_required == true && (edid->version > 1 ||
10665 		   (edid->version == 1 && edid->revision > 1))) {
10666 			for (i = 0; i < 4; i++) {
10667 
10668 				timing	= &edid->detailed_timings[i];
10669 				data	= &timing->data.other_data;
10670 				range	= &data->data.range;
10671 				/*
10672 				 * Check if monitor has continuous frequency mode
10673 				 */
10674 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10675 					continue;
10676 				/*
10677 				 * Check for flag range limits only. If flag == 1 then
10678 				 * no additional timing information provided.
10679 				 * Default GTF, GTF Secondary curve and CVT are not
10680 				 * supported
10681 				 */
10682 				if (range->flags != 1)
10683 					continue;
10684 
10685 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10686 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10687 				amdgpu_dm_connector->pixel_clock_mhz =
10688 					range->pixel_clock_mhz * 10;
10689 
10690 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10691 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10692 
10693 				break;
10694 			}
10695 
10696 			if (amdgpu_dm_connector->max_vfreq -
10697 			    amdgpu_dm_connector->min_vfreq > 10) {
10698 
10699 				freesync_capable = true;
10700 			}
10701 		}
10702 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10703 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10704 		if (i >= 0 && vsdb_info.freesync_supported) {
10705 			timing  = &edid->detailed_timings[i];
10706 			data    = &timing->data.other_data;
10707 
10708 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10709 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10710 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10711 				freesync_capable = true;
10712 
10713 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10714 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10715 		}
10716 	}
10717 
10718 update:
10719 	if (dm_con_state)
10720 		dm_con_state->freesync_capable = freesync_capable;
10721 
10722 	if (connector->vrr_capable_property)
10723 		drm_connector_set_vrr_capable_property(connector,
10724 						       freesync_capable);
10725 }
10726 
10727 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10728 {
10729 	struct amdgpu_device *adev = drm_to_adev(dev);
10730 	struct dc *dc = adev->dm.dc;
10731 	int i;
10732 
10733 	mutex_lock(&adev->dm.dc_lock);
10734 	if (dc->current_state) {
10735 		for (i = 0; i < dc->current_state->stream_count; ++i)
10736 			dc->current_state->streams[i]
10737 				->triggered_crtc_reset.enabled =
10738 				adev->dm.force_timing_sync;
10739 
10740 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10741 		dc_trigger_sync(dc, dc->current_state);
10742 	}
10743 	mutex_unlock(&adev->dm.dc_lock);
10744 }
10745 
10746 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10747 		       uint32_t value, const char *func_name)
10748 {
10749 #ifdef DM_CHECK_ADDR_0
10750 	if (address == 0) {
10751 		DC_ERR("invalid register write. address = 0");
10752 		return;
10753 	}
10754 #endif
10755 	cgs_write_register(ctx->cgs_device, address, value);
10756 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10757 }
10758 
10759 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10760 			  const char *func_name)
10761 {
10762 	uint32_t value;
10763 #ifdef DM_CHECK_ADDR_0
10764 	if (address == 0) {
10765 		DC_ERR("invalid register read; address = 0\n");
10766 		return 0;
10767 	}
10768 #endif
10769 
10770 	if (ctx->dmub_srv &&
10771 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10772 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10773 		ASSERT(false);
10774 		return 0;
10775 	}
10776 
10777 	value = cgs_read_register(ctx->cgs_device, address);
10778 
10779 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10780 
10781 	return value;
10782 }
10783 
10784 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10785 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10786 {
10787 	struct amdgpu_device *adev = ctx->driver_context;
10788 	int ret = 0;
10789 
10790 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10791 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10792 	if (ret == 0) {
10793 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10794 		return -1;
10795 	}
10796 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10797 
10798 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10799 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10800 
10801 		// For read case, Copy data to payload
10802 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10803 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10804 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10805 			adev->dm.dmub_notify->aux_reply.length);
10806 	}
10807 
10808 	return adev->dm.dmub_notify->aux_reply.length;
10809 }
10810