xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 6bcfe8eaeef01fb389e951e7c648b934dfd62f15)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
54 #endif
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
57 
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
64 #endif
65 #include "amdgpu_dm_psr.h"
66 
67 #include "ivsrcid/ivsrcid_vislands30.h"
68 
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
78 
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fb_helper.h>
86 #include <drm/drm_fourcc.h>
87 #include <drm/drm_edid.h>
88 #include <drm/drm_vblank.h>
89 #include <drm/drm_audio_component.h>
90 #include <drm/drm_gem_atomic_helper.h>
91 #include <drm/drm_plane_helper.h>
92 
93 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
94 
95 #include "dcn/dcn_1_0_offset.h"
96 #include "dcn/dcn_1_0_sh_mask.h"
97 #include "soc15_hw_ip.h"
98 #include "soc15_common.h"
99 #include "vega10_ip_offset.h"
100 
101 #include "soc15_common.h"
102 
103 #include "gc/gc_11_0_0_offset.h"
104 #include "gc/gc_11_0_0_sh_mask.h"
105 
106 #include "modules/inc/mod_freesync.h"
107 #include "modules/power/power_helpers.h"
108 #include "modules/inc/mod_info_packet.h"
109 
110 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
112 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
114 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
116 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
118 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
120 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
122 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
124 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
125 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
126 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
128 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
130 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
132 
133 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
134 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
135 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
136 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
137 
138 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
139 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
140 
141 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
142 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
143 
144 /* Number of bytes in PSP header for firmware. */
145 #define PSP_HEADER_BYTES 0x100
146 
147 /* Number of bytes in PSP footer for firmware. */
148 #define PSP_FOOTER_BYTES 0x100
149 
150 /**
151  * DOC: overview
152  *
153  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
154  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
155  * requests into DC requests, and DC responses into DRM responses.
156  *
157  * The root control structure is &struct amdgpu_display_manager.
158  */
159 
160 /* basic init/fini API */
161 static int amdgpu_dm_init(struct amdgpu_device *adev);
162 static void amdgpu_dm_fini(struct amdgpu_device *adev);
163 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
164 
165 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
166 {
167 	switch (link->dpcd_caps.dongle_type) {
168 	case DISPLAY_DONGLE_NONE:
169 		return DRM_MODE_SUBCONNECTOR_Native;
170 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
171 		return DRM_MODE_SUBCONNECTOR_VGA;
172 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
173 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
174 		return DRM_MODE_SUBCONNECTOR_DVID;
175 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
176 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
177 		return DRM_MODE_SUBCONNECTOR_HDMIA;
178 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
179 	default:
180 		return DRM_MODE_SUBCONNECTOR_Unknown;
181 	}
182 }
183 
184 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
185 {
186 	struct dc_link *link = aconnector->dc_link;
187 	struct drm_connector *connector = &aconnector->base;
188 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
189 
190 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
191 		return;
192 
193 	if (aconnector->dc_sink)
194 		subconnector = get_subconnector_type(link);
195 
196 	drm_object_property_set_value(&connector->base,
197 			connector->dev->mode_config.dp_subconnector_property,
198 			subconnector);
199 }
200 
201 /*
202  * initializes drm_device display related structures, based on the information
203  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
204  * drm_encoder, drm_mode_config
205  *
206  * Returns 0 on success
207  */
208 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
209 /* removes and deallocates the drm structures, created by the above function */
210 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
211 
212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
214 				    uint32_t link_index,
215 				    struct amdgpu_encoder *amdgpu_encoder);
216 static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 				  struct amdgpu_encoder *aencoder,
218 				  uint32_t link_index);
219 
220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221 
222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223 
224 static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 				  struct drm_atomic_state *state);
226 
227 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
228 static void handle_hpd_rx_irq(void *param);
229 
230 static bool
231 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
232 				 struct drm_crtc_state *new_crtc_state);
233 /*
234  * dm_vblank_get_counter
235  *
236  * @brief
237  * Get counter for number of vertical blanks
238  *
239  * @param
240  * struct amdgpu_device *adev - [in] desired amdgpu device
241  * int disp_idx - [in] which CRTC to get the counter from
242  *
243  * @return
244  * Counter for vertical blanks
245  */
246 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
247 {
248 	if (crtc >= adev->mode_info.num_crtc)
249 		return 0;
250 	else {
251 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
252 
253 		if (acrtc->dm_irq_params.stream == NULL) {
254 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
255 				  crtc);
256 			return 0;
257 		}
258 
259 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
260 	}
261 }
262 
263 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
264 				  u32 *vbl, u32 *position)
265 {
266 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
267 
268 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
269 		return -EINVAL;
270 	else {
271 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
272 
273 		if (acrtc->dm_irq_params.stream ==  NULL) {
274 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
275 				  crtc);
276 			return 0;
277 		}
278 
279 		/*
280 		 * TODO rework base driver to use values directly.
281 		 * for now parse it back into reg-format
282 		 */
283 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
284 					 &v_blank_start,
285 					 &v_blank_end,
286 					 &h_position,
287 					 &v_position);
288 
289 		*position = v_position | (h_position << 16);
290 		*vbl = v_blank_start | (v_blank_end << 16);
291 	}
292 
293 	return 0;
294 }
295 
296 static bool dm_is_idle(void *handle)
297 {
298 	/* XXX todo */
299 	return true;
300 }
301 
302 static int dm_wait_for_idle(void *handle)
303 {
304 	/* XXX todo */
305 	return 0;
306 }
307 
308 static bool dm_check_soft_reset(void *handle)
309 {
310 	return false;
311 }
312 
313 static int dm_soft_reset(void *handle)
314 {
315 	/* XXX todo */
316 	return 0;
317 }
318 
319 static struct amdgpu_crtc *
320 get_crtc_by_otg_inst(struct amdgpu_device *adev,
321 		     int otg_inst)
322 {
323 	struct drm_device *dev = adev_to_drm(adev);
324 	struct drm_crtc *crtc;
325 	struct amdgpu_crtc *amdgpu_crtc;
326 
327 	if (WARN_ON(otg_inst == -1))
328 		return adev->mode_info.crtcs[0];
329 
330 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
331 		amdgpu_crtc = to_amdgpu_crtc(crtc);
332 
333 		if (amdgpu_crtc->otg_inst == otg_inst)
334 			return amdgpu_crtc;
335 	}
336 
337 	return NULL;
338 }
339 
340 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
341 					      struct dm_crtc_state *new_state)
342 {
343 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
344 		return true;
345 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
346 		return true;
347 	else
348 		return false;
349 }
350 
351 /**
352  * dm_pflip_high_irq() - Handle pageflip interrupt
353  * @interrupt_params: ignored
354  *
355  * Handles the pageflip interrupt by notifying all interested parties
356  * that the pageflip has been completed.
357  */
358 static void dm_pflip_high_irq(void *interrupt_params)
359 {
360 	struct amdgpu_crtc *amdgpu_crtc;
361 	struct common_irq_params *irq_params = interrupt_params;
362 	struct amdgpu_device *adev = irq_params->adev;
363 	unsigned long flags;
364 	struct drm_pending_vblank_event *e;
365 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
366 	bool vrr_active;
367 
368 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
369 
370 	/* IRQ could occur when in initial stage */
371 	/* TODO work and BO cleanup */
372 	if (amdgpu_crtc == NULL) {
373 		DC_LOG_PFLIP("CRTC is null, returning.\n");
374 		return;
375 	}
376 
377 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
378 
379 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
380 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
381 						 amdgpu_crtc->pflip_status,
382 						 AMDGPU_FLIP_SUBMITTED,
383 						 amdgpu_crtc->crtc_id,
384 						 amdgpu_crtc);
385 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
386 		return;
387 	}
388 
389 	/* page flip completed. */
390 	e = amdgpu_crtc->event;
391 	amdgpu_crtc->event = NULL;
392 
393 	WARN_ON(!e);
394 
395 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
396 
397 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
398 	if (!vrr_active ||
399 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
400 				      &v_blank_end, &hpos, &vpos) ||
401 	    (vpos < v_blank_start)) {
402 		/* Update to correct count and vblank timestamp if racing with
403 		 * vblank irq. This also updates to the correct vblank timestamp
404 		 * even in VRR mode, as scanout is past the front-porch atm.
405 		 */
406 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
407 
408 		/* Wake up userspace by sending the pageflip event with proper
409 		 * count and timestamp of vblank of flip completion.
410 		 */
411 		if (e) {
412 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
413 
414 			/* Event sent, so done with vblank for this flip */
415 			drm_crtc_vblank_put(&amdgpu_crtc->base);
416 		}
417 	} else if (e) {
418 		/* VRR active and inside front-porch: vblank count and
419 		 * timestamp for pageflip event will only be up to date after
420 		 * drm_crtc_handle_vblank() has been executed from late vblank
421 		 * irq handler after start of back-porch (vline 0). We queue the
422 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
423 		 * updated timestamp and count, once it runs after us.
424 		 *
425 		 * We need to open-code this instead of using the helper
426 		 * drm_crtc_arm_vblank_event(), as that helper would
427 		 * call drm_crtc_accurate_vblank_count(), which we must
428 		 * not call in VRR mode while we are in front-porch!
429 		 */
430 
431 		/* sequence will be replaced by real count during send-out. */
432 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
433 		e->pipe = amdgpu_crtc->crtc_id;
434 
435 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
436 		e = NULL;
437 	}
438 
439 	/* Keep track of vblank of this flip for flip throttling. We use the
440 	 * cooked hw counter, as that one incremented at start of this vblank
441 	 * of pageflip completion, so last_flip_vblank is the forbidden count
442 	 * for queueing new pageflips if vsync + VRR is enabled.
443 	 */
444 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
445 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
446 
447 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
448 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
449 
450 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
451 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
452 		     vrr_active, (int) !e);
453 }
454 
455 static void dm_vupdate_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct drm_device *drm_dev;
461 	struct drm_vblank_crtc *vblank;
462 	ktime_t frame_duration_ns, previous_timestamp;
463 	unsigned long flags;
464 	int vrr_active;
465 
466 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
467 
468 	if (acrtc) {
469 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
470 		drm_dev = acrtc->base.dev;
471 		vblank = &drm_dev->vblank[acrtc->base.index];
472 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
473 		frame_duration_ns = vblank->time - previous_timestamp;
474 
475 		if (frame_duration_ns > 0) {
476 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
477 						frame_duration_ns,
478 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
479 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
480 		}
481 
482 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
483 			      acrtc->crtc_id,
484 			      vrr_active);
485 
486 		/* Core vblank handling is done here after end of front-porch in
487 		 * vrr mode, as vblank timestamping will give valid results
488 		 * while now done after front-porch. This will also deliver
489 		 * page-flip completion events that have been queued to us
490 		 * if a pageflip happened inside front-porch.
491 		 */
492 		if (vrr_active) {
493 			dm_crtc_handle_vblank(acrtc);
494 
495 			/* BTR processing for pre-DCE12 ASICs */
496 			if (acrtc->dm_irq_params.stream &&
497 			    adev->family < AMDGPU_FAMILY_AI) {
498 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
499 				mod_freesync_handle_v_update(
500 				    adev->dm.freesync_module,
501 				    acrtc->dm_irq_params.stream,
502 				    &acrtc->dm_irq_params.vrr_params);
503 
504 				dc_stream_adjust_vmin_vmax(
505 				    adev->dm.dc,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params.adjust);
508 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
509 			}
510 		}
511 	}
512 }
513 
514 /**
515  * dm_crtc_high_irq() - Handles CRTC interrupt
516  * @interrupt_params: used for determining the CRTC instance
517  *
518  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
519  * event handler.
520  */
521 static void dm_crtc_high_irq(void *interrupt_params)
522 {
523 	struct common_irq_params *irq_params = interrupt_params;
524 	struct amdgpu_device *adev = irq_params->adev;
525 	struct amdgpu_crtc *acrtc;
526 	unsigned long flags;
527 	int vrr_active;
528 
529 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
530 	if (!acrtc)
531 		return;
532 
533 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
534 
535 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
536 		      vrr_active, acrtc->dm_irq_params.active_planes);
537 
538 	/**
539 	 * Core vblank handling at start of front-porch is only possible
540 	 * in non-vrr mode, as only there vblank timestamping will give
541 	 * valid results while done in front-porch. Otherwise defer it
542 	 * to dm_vupdate_high_irq after end of front-porch.
543 	 */
544 	if (!vrr_active)
545 		dm_crtc_handle_vblank(acrtc);
546 
547 	/**
548 	 * Following stuff must happen at start of vblank, for crc
549 	 * computation and below-the-range btr support in vrr mode.
550 	 */
551 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
552 
553 	/* BTR updates need to happen before VUPDATE on Vega and above. */
554 	if (adev->family < AMDGPU_FAMILY_AI)
555 		return;
556 
557 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
558 
559 	if (acrtc->dm_irq_params.stream &&
560 	    acrtc->dm_irq_params.vrr_params.supported &&
561 	    acrtc->dm_irq_params.freesync_config.state ==
562 		    VRR_STATE_ACTIVE_VARIABLE) {
563 		mod_freesync_handle_v_update(adev->dm.freesync_module,
564 					     acrtc->dm_irq_params.stream,
565 					     &acrtc->dm_irq_params.vrr_params);
566 
567 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
568 					   &acrtc->dm_irq_params.vrr_params.adjust);
569 	}
570 
571 	/*
572 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
573 	 * In that case, pageflip completion interrupts won't fire and pageflip
574 	 * completion events won't get delivered. Prevent this by sending
575 	 * pending pageflip events from here if a flip is still pending.
576 	 *
577 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
578 	 * avoid race conditions between flip programming and completion,
579 	 * which could cause too early flip completion events.
580 	 */
581 	if (adev->family >= AMDGPU_FAMILY_RV &&
582 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
583 	    acrtc->dm_irq_params.active_planes == 0) {
584 		if (acrtc->event) {
585 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
586 			acrtc->event = NULL;
587 			drm_crtc_vblank_put(&acrtc->base);
588 		}
589 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
590 	}
591 
592 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
593 }
594 
595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
596 /**
597  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
598  * DCN generation ASICs
599  * @interrupt_params: interrupt parameters
600  *
601  * Used to set crc window/read out crc value at vertical line 0 position
602  */
603 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
604 {
605 	struct common_irq_params *irq_params = interrupt_params;
606 	struct amdgpu_device *adev = irq_params->adev;
607 	struct amdgpu_crtc *acrtc;
608 
609 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
610 
611 	if (!acrtc)
612 		return;
613 
614 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
615 }
616 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
617 
618 /**
619  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
620  * @adev: amdgpu_device pointer
621  * @notify: dmub notification structure
622  *
623  * Dmub AUX or SET_CONFIG command completion processing callback
624  * Copies dmub notification to DM which is to be read by AUX command.
625  * issuing thread and also signals the event to wake up the thread.
626  */
627 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
628 					struct dmub_notification *notify)
629 {
630 	if (adev->dm.dmub_notify)
631 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
632 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
633 		complete(&adev->dm.dmub_aux_transfer_done);
634 }
635 
636 /**
637  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
638  * @adev: amdgpu_device pointer
639  * @notify: dmub notification structure
640  *
641  * Dmub Hpd interrupt processing callback. Gets displayindex through the
642  * ink index and calls helper to do the processing.
643  */
644 static void dmub_hpd_callback(struct amdgpu_device *adev,
645 			      struct dmub_notification *notify)
646 {
647 	struct amdgpu_dm_connector *aconnector;
648 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
649 	struct drm_connector *connector;
650 	struct drm_connector_list_iter iter;
651 	struct dc_link *link;
652 	uint8_t link_index = 0;
653 	struct drm_device *dev;
654 
655 	if (adev == NULL)
656 		return;
657 
658 	if (notify == NULL) {
659 		DRM_ERROR("DMUB HPD callback notification was NULL");
660 		return;
661 	}
662 
663 	if (notify->link_index > adev->dm.dc->link_count) {
664 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
665 		return;
666 	}
667 
668 	link_index = notify->link_index;
669 	link = adev->dm.dc->links[link_index];
670 	dev = adev->dm.ddev;
671 
672 	drm_connector_list_iter_begin(dev, &iter);
673 	drm_for_each_connector_iter(connector, &iter) {
674 		aconnector = to_amdgpu_dm_connector(connector);
675 		if (link && aconnector->dc_link == link) {
676 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
677 			hpd_aconnector = aconnector;
678 			break;
679 		}
680 	}
681 	drm_connector_list_iter_end(&iter);
682 
683 	if (hpd_aconnector) {
684 		if (notify->type == DMUB_NOTIFICATION_HPD)
685 			handle_hpd_irq_helper(hpd_aconnector);
686 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
687 			handle_hpd_rx_irq(hpd_aconnector);
688 	}
689 }
690 
691 /**
692  * register_dmub_notify_callback - Sets callback for DMUB notify
693  * @adev: amdgpu_device pointer
694  * @type: Type of dmub notification
695  * @callback: Dmub interrupt callback function
696  * @dmub_int_thread_offload: offload indicator
697  *
698  * API to register a dmub callback handler for a dmub notification
699  * Also sets indicator whether callback processing to be offloaded.
700  * to dmub interrupt handling thread
701  * Return: true if successfully registered, false if there is existing registration
702  */
703 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
704 					  enum dmub_notification_type type,
705 					  dmub_notify_interrupt_callback_t callback,
706 					  bool dmub_int_thread_offload)
707 {
708 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
709 		adev->dm.dmub_callback[type] = callback;
710 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
711 	} else
712 		return false;
713 
714 	return true;
715 }
716 
717 static void dm_handle_hpd_work(struct work_struct *work)
718 {
719 	struct dmub_hpd_work *dmub_hpd_wrk;
720 
721 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
722 
723 	if (!dmub_hpd_wrk->dmub_notify) {
724 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
725 		return;
726 	}
727 
728 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
729 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
730 		dmub_hpd_wrk->dmub_notify);
731 	}
732 
733 	kfree(dmub_hpd_wrk->dmub_notify);
734 	kfree(dmub_hpd_wrk);
735 
736 }
737 
738 #define DMUB_TRACE_MAX_READ 64
739 /**
740  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
741  * @interrupt_params: used for determining the Outbox instance
742  *
743  * Handles the Outbox Interrupt
744  * event handler.
745  */
746 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
747 {
748 	struct dmub_notification notify;
749 	struct common_irq_params *irq_params = interrupt_params;
750 	struct amdgpu_device *adev = irq_params->adev;
751 	struct amdgpu_display_manager *dm = &adev->dm;
752 	struct dmcub_trace_buf_entry entry = { 0 };
753 	uint32_t count = 0;
754 	struct dmub_hpd_work *dmub_hpd_wrk;
755 	struct dc_link *plink = NULL;
756 
757 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
758 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
759 
760 		do {
761 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
762 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
763 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
764 				continue;
765 			}
766 			if (!dm->dmub_callback[notify.type]) {
767 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
768 				continue;
769 			}
770 			if (dm->dmub_thread_offload[notify.type] == true) {
771 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
772 				if (!dmub_hpd_wrk) {
773 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
774 					return;
775 				}
776 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
777 				if (!dmub_hpd_wrk->dmub_notify) {
778 					kfree(dmub_hpd_wrk);
779 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
780 					return;
781 				}
782 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
783 				if (dmub_hpd_wrk->dmub_notify)
784 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
785 				dmub_hpd_wrk->adev = adev;
786 				if (notify.type == DMUB_NOTIFICATION_HPD) {
787 					plink = adev->dm.dc->links[notify.link_index];
788 					if (plink) {
789 						plink->hpd_status =
790 							notify.hpd_status == DP_HPD_PLUG;
791 					}
792 				}
793 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
794 			} else {
795 				dm->dmub_callback[notify.type](adev, &notify);
796 			}
797 		} while (notify.pending_notification);
798 	}
799 
800 
801 	do {
802 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
803 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
804 							entry.param0, entry.param1);
805 
806 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
807 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
808 		} else
809 			break;
810 
811 		count++;
812 
813 	} while (count <= DMUB_TRACE_MAX_READ);
814 
815 	if (count > DMUB_TRACE_MAX_READ)
816 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
817 }
818 
819 static int dm_set_clockgating_state(void *handle,
820 		  enum amd_clockgating_state state)
821 {
822 	return 0;
823 }
824 
825 static int dm_set_powergating_state(void *handle,
826 		  enum amd_powergating_state state)
827 {
828 	return 0;
829 }
830 
831 /* Prototypes of private functions */
832 static int dm_early_init(void* handle);
833 
834 /* Allocate memory for FBC compressed data  */
835 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
836 {
837 	struct drm_device *dev = connector->dev;
838 	struct amdgpu_device *adev = drm_to_adev(dev);
839 	struct dm_compressor_info *compressor = &adev->dm.compressor;
840 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
841 	struct drm_display_mode *mode;
842 	unsigned long max_size = 0;
843 
844 	if (adev->dm.dc->fbc_compressor == NULL)
845 		return;
846 
847 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
848 		return;
849 
850 	if (compressor->bo_ptr)
851 		return;
852 
853 
854 	list_for_each_entry(mode, &connector->modes, head) {
855 		if (max_size < mode->htotal * mode->vtotal)
856 			max_size = mode->htotal * mode->vtotal;
857 	}
858 
859 	if (max_size) {
860 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
861 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
862 			    &compressor->gpu_addr, &compressor->cpu_addr);
863 
864 		if (r)
865 			DRM_ERROR("DM: Failed to initialize FBC\n");
866 		else {
867 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
868 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
869 		}
870 
871 	}
872 
873 }
874 
875 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
876 					  int pipe, bool *enabled,
877 					  unsigned char *buf, int max_bytes)
878 {
879 	struct drm_device *dev = dev_get_drvdata(kdev);
880 	struct amdgpu_device *adev = drm_to_adev(dev);
881 	struct drm_connector *connector;
882 	struct drm_connector_list_iter conn_iter;
883 	struct amdgpu_dm_connector *aconnector;
884 	int ret = 0;
885 
886 	*enabled = false;
887 
888 	mutex_lock(&adev->dm.audio_lock);
889 
890 	drm_connector_list_iter_begin(dev, &conn_iter);
891 	drm_for_each_connector_iter(connector, &conn_iter) {
892 		aconnector = to_amdgpu_dm_connector(connector);
893 		if (aconnector->audio_inst != port)
894 			continue;
895 
896 		*enabled = true;
897 		ret = drm_eld_size(connector->eld);
898 		memcpy(buf, connector->eld, min(max_bytes, ret));
899 
900 		break;
901 	}
902 	drm_connector_list_iter_end(&conn_iter);
903 
904 	mutex_unlock(&adev->dm.audio_lock);
905 
906 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
907 
908 	return ret;
909 }
910 
911 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
912 	.get_eld = amdgpu_dm_audio_component_get_eld,
913 };
914 
915 static int amdgpu_dm_audio_component_bind(struct device *kdev,
916 				       struct device *hda_kdev, void *data)
917 {
918 	struct drm_device *dev = dev_get_drvdata(kdev);
919 	struct amdgpu_device *adev = drm_to_adev(dev);
920 	struct drm_audio_component *acomp = data;
921 
922 	acomp->ops = &amdgpu_dm_audio_component_ops;
923 	acomp->dev = kdev;
924 	adev->dm.audio_component = acomp;
925 
926 	return 0;
927 }
928 
929 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
930 					  struct device *hda_kdev, void *data)
931 {
932 	struct drm_device *dev = dev_get_drvdata(kdev);
933 	struct amdgpu_device *adev = drm_to_adev(dev);
934 	struct drm_audio_component *acomp = data;
935 
936 	acomp->ops = NULL;
937 	acomp->dev = NULL;
938 	adev->dm.audio_component = NULL;
939 }
940 
941 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
942 	.bind	= amdgpu_dm_audio_component_bind,
943 	.unbind	= amdgpu_dm_audio_component_unbind,
944 };
945 
946 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
947 {
948 	int i, ret;
949 
950 	if (!amdgpu_audio)
951 		return 0;
952 
953 	adev->mode_info.audio.enabled = true;
954 
955 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
956 
957 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
958 		adev->mode_info.audio.pin[i].channels = -1;
959 		adev->mode_info.audio.pin[i].rate = -1;
960 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
961 		adev->mode_info.audio.pin[i].status_bits = 0;
962 		adev->mode_info.audio.pin[i].category_code = 0;
963 		adev->mode_info.audio.pin[i].connected = false;
964 		adev->mode_info.audio.pin[i].id =
965 			adev->dm.dc->res_pool->audios[i]->inst;
966 		adev->mode_info.audio.pin[i].offset = 0;
967 	}
968 
969 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
970 	if (ret < 0)
971 		return ret;
972 
973 	adev->dm.audio_registered = true;
974 
975 	return 0;
976 }
977 
978 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
979 {
980 	if (!amdgpu_audio)
981 		return;
982 
983 	if (!adev->mode_info.audio.enabled)
984 		return;
985 
986 	if (adev->dm.audio_registered) {
987 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
988 		adev->dm.audio_registered = false;
989 	}
990 
991 	/* TODO: Disable audio? */
992 
993 	adev->mode_info.audio.enabled = false;
994 }
995 
996 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
997 {
998 	struct drm_audio_component *acomp = adev->dm.audio_component;
999 
1000 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1001 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1002 
1003 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1004 						 pin, -1);
1005 	}
1006 }
1007 
1008 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1009 {
1010 	const struct dmcub_firmware_header_v1_0 *hdr;
1011 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1012 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1013 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1014 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1015 	struct abm *abm = adev->dm.dc->res_pool->abm;
1016 	struct dmub_srv_hw_params hw_params;
1017 	enum dmub_status status;
1018 	const unsigned char *fw_inst_const, *fw_bss_data;
1019 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1020 	bool has_hw_support;
1021 
1022 	if (!dmub_srv)
1023 		/* DMUB isn't supported on the ASIC. */
1024 		return 0;
1025 
1026 	if (!fb_info) {
1027 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	if (!dmub_fw) {
1032 		/* Firmware required for DMUB support. */
1033 		DRM_ERROR("No firmware provided for DMUB.\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1038 	if (status != DMUB_STATUS_OK) {
1039 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (!has_hw_support) {
1044 		DRM_INFO("DMUB unsupported on ASIC\n");
1045 		return 0;
1046 	}
1047 
1048 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1049 	status = dmub_srv_hw_reset(dmub_srv);
1050 	if (status != DMUB_STATUS_OK)
1051 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1052 
1053 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054 
1055 	fw_inst_const = dmub_fw->data +
1056 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057 			PSP_HEADER_BYTES;
1058 
1059 	fw_bss_data = dmub_fw->data +
1060 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 		      le32_to_cpu(hdr->inst_const_bytes);
1062 
1063 	/* Copy firmware and bios info into FB memory. */
1064 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066 
1067 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068 
1069 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 	 * will be done by dm_dmub_hw_init
1073 	 */
1074 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 				fw_inst_const_size);
1077 	}
1078 
1079 	if (fw_bss_data_size)
1080 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 		       fw_bss_data, fw_bss_data_size);
1082 
1083 	/* Copy firmware bios info into FB memory. */
1084 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 	       adev->bios_size);
1086 
1087 	/* Reset regions that need to be reset. */
1088 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090 
1091 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093 
1094 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096 
1097 	/* Initialize hardware. */
1098 	memset(&hw_params, 0, sizeof(hw_params));
1099 	hw_params.fb_base = adev->gmc.fb_start;
1100 	hw_params.fb_offset = adev->gmc.aper_base;
1101 
1102 	/* backdoor load firmware and trigger dmub running */
1103 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 		hw_params.load_inst_const = true;
1105 
1106 	if (dmcu)
1107 		hw_params.psp_version = dmcu->psp_version;
1108 
1109 	for (i = 0; i < fb_info->num_fb; ++i)
1110 		hw_params.fb[i] = &fb_info->fb[i];
1111 
1112 	switch (adev->ip_versions[DCE_HWIP][0]) {
1113 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1114 		hw_params.dpia_supported = true;
1115 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1116 		break;
1117 	default:
1118 		break;
1119 	}
1120 
1121 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1122 	if (status != DMUB_STATUS_OK) {
1123 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1124 		return -EINVAL;
1125 	}
1126 
1127 	/* Wait for firmware load to finish. */
1128 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1129 	if (status != DMUB_STATUS_OK)
1130 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1131 
1132 	/* Init DMCU and ABM if available. */
1133 	if (dmcu && abm) {
1134 		dmcu->funcs->dmcu_init(dmcu);
1135 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1136 	}
1137 
1138 	if (!adev->dm.dc->ctx->dmub_srv)
1139 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1140 	if (!adev->dm.dc->ctx->dmub_srv) {
1141 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1142 		return -ENOMEM;
1143 	}
1144 
1145 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1146 		 adev->dm.dmcub_fw_version);
1147 
1148 	return 0;
1149 }
1150 
1151 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1152 {
1153 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1154 	enum dmub_status status;
1155 	bool init;
1156 
1157 	if (!dmub_srv) {
1158 		/* DMUB isn't supported on the ASIC. */
1159 		return;
1160 	}
1161 
1162 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1163 	if (status != DMUB_STATUS_OK)
1164 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1165 
1166 	if (status == DMUB_STATUS_OK && init) {
1167 		/* Wait for firmware load to finish. */
1168 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1169 		if (status != DMUB_STATUS_OK)
1170 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171 	} else {
1172 		/* Perform the full hardware initialization. */
1173 		dm_dmub_hw_init(adev);
1174 	}
1175 }
1176 
1177 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1178 {
1179 	uint64_t pt_base;
1180 	uint32_t logical_addr_low;
1181 	uint32_t logical_addr_high;
1182 	uint32_t agp_base, agp_bot, agp_top;
1183 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1184 
1185 	memset(pa_config, 0, sizeof(*pa_config));
1186 
1187 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1188 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1189 
1190 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1191 		/*
1192 		 * Raven2 has a HW issue that it is unable to use the vram which
1193 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1194 		 * workaround that increase system aperture high address (add 1)
1195 		 * to get rid of the VM fault and hardware hang.
1196 		 */
1197 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1198 	else
1199 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1200 
1201 	agp_base = 0;
1202 	agp_bot = adev->gmc.agp_start >> 24;
1203 	agp_top = adev->gmc.agp_end >> 24;
1204 
1205 
1206 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1207 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1208 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1209 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1210 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1211 	page_table_base.low_part = lower_32_bits(pt_base);
1212 
1213 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1214 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1215 
1216 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1217 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1218 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1219 
1220 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1221 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1222 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1223 
1224 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1225 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1226 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1227 
1228 	pa_config->is_hvm_enabled = 0;
1229 
1230 }
1231 
1232 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1233 {
1234 	struct hpd_rx_irq_offload_work *offload_work;
1235 	struct amdgpu_dm_connector *aconnector;
1236 	struct dc_link *dc_link;
1237 	struct amdgpu_device *adev;
1238 	enum dc_connection_type new_connection_type = dc_connection_none;
1239 	unsigned long flags;
1240 
1241 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1242 	aconnector = offload_work->offload_wq->aconnector;
1243 
1244 	if (!aconnector) {
1245 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1246 		goto skip;
1247 	}
1248 
1249 	adev = drm_to_adev(aconnector->base.dev);
1250 	dc_link = aconnector->dc_link;
1251 
1252 	mutex_lock(&aconnector->hpd_lock);
1253 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1254 		DRM_ERROR("KMS: Failed to detect connector\n");
1255 	mutex_unlock(&aconnector->hpd_lock);
1256 
1257 	if (new_connection_type == dc_connection_none)
1258 		goto skip;
1259 
1260 	if (amdgpu_in_reset(adev))
1261 		goto skip;
1262 
1263 	mutex_lock(&adev->dm.dc_lock);
1264 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1265 		dc_link_dp_handle_automated_test(dc_link);
1266 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1267 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1268 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1269 		dc_link_dp_handle_link_loss(dc_link);
1270 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1271 		offload_work->offload_wq->is_handling_link_loss = false;
1272 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1273 	}
1274 	mutex_unlock(&adev->dm.dc_lock);
1275 
1276 skip:
1277 	kfree(offload_work);
1278 
1279 }
1280 
1281 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1282 {
1283 	int max_caps = dc->caps.max_links;
1284 	int i = 0;
1285 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1286 
1287 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1288 
1289 	if (!hpd_rx_offload_wq)
1290 		return NULL;
1291 
1292 
1293 	for (i = 0; i < max_caps; i++) {
1294 		hpd_rx_offload_wq[i].wq =
1295 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1296 
1297 		if (hpd_rx_offload_wq[i].wq == NULL) {
1298 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1299 			return NULL;
1300 		}
1301 
1302 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1303 	}
1304 
1305 	return hpd_rx_offload_wq;
1306 }
1307 
1308 struct amdgpu_stutter_quirk {
1309 	u16 chip_vendor;
1310 	u16 chip_device;
1311 	u16 subsys_vendor;
1312 	u16 subsys_device;
1313 	u8 revision;
1314 };
1315 
1316 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1317 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1318 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1319 	{ 0, 0, 0, 0, 0 },
1320 };
1321 
1322 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1323 {
1324 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1325 
1326 	while (p && p->chip_device != 0) {
1327 		if (pdev->vendor == p->chip_vendor &&
1328 		    pdev->device == p->chip_device &&
1329 		    pdev->subsystem_vendor == p->subsys_vendor &&
1330 		    pdev->subsystem_device == p->subsys_device &&
1331 		    pdev->revision == p->revision) {
1332 			return true;
1333 		}
1334 		++p;
1335 	}
1336 	return false;
1337 }
1338 
1339 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1340 	{
1341 		.matches = {
1342 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1343 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1344 		},
1345 	},
1346 	{
1347 		.matches = {
1348 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1349 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1350 		},
1351 	},
1352 	{
1353 		.matches = {
1354 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1355 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1356 		},
1357 	},
1358 	{}
1359 };
1360 
1361 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1362 {
1363 	const struct dmi_system_id *dmi_id;
1364 
1365 	dm->aux_hpd_discon_quirk = false;
1366 
1367 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1368 	if (dmi_id) {
1369 		dm->aux_hpd_discon_quirk = true;
1370 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1371 	}
1372 }
1373 
1374 static int amdgpu_dm_init(struct amdgpu_device *adev)
1375 {
1376 	struct dc_init_data init_data;
1377 #ifdef CONFIG_DRM_AMD_DC_HDCP
1378 	struct dc_callback_init init_params;
1379 #endif
1380 	int r;
1381 
1382 	adev->dm.ddev = adev_to_drm(adev);
1383 	adev->dm.adev = adev;
1384 
1385 	/* Zero all the fields */
1386 	memset(&init_data, 0, sizeof(init_data));
1387 #ifdef CONFIG_DRM_AMD_DC_HDCP
1388 	memset(&init_params, 0, sizeof(init_params));
1389 #endif
1390 
1391 	mutex_init(&adev->dm.dc_lock);
1392 	mutex_init(&adev->dm.audio_lock);
1393 	spin_lock_init(&adev->dm.vblank_lock);
1394 
1395 	if(amdgpu_dm_irq_init(adev)) {
1396 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1397 		goto error;
1398 	}
1399 
1400 	init_data.asic_id.chip_family = adev->family;
1401 
1402 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1403 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1404 	init_data.asic_id.chip_id = adev->pdev->device;
1405 
1406 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1407 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1408 	init_data.asic_id.atombios_base_address =
1409 		adev->mode_info.atom_context->bios;
1410 
1411 	init_data.driver = adev;
1412 
1413 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1414 
1415 	if (!adev->dm.cgs_device) {
1416 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1417 		goto error;
1418 	}
1419 
1420 	init_data.cgs_device = adev->dm.cgs_device;
1421 
1422 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1423 
1424 	switch (adev->ip_versions[DCE_HWIP][0]) {
1425 	case IP_VERSION(2, 1, 0):
1426 		switch (adev->dm.dmcub_fw_version) {
1427 		case 0: /* development */
1428 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1429 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1430 			init_data.flags.disable_dmcu = false;
1431 			break;
1432 		default:
1433 			init_data.flags.disable_dmcu = true;
1434 		}
1435 		break;
1436 	case IP_VERSION(2, 0, 3):
1437 		init_data.flags.disable_dmcu = true;
1438 		break;
1439 	default:
1440 		break;
1441 	}
1442 
1443 	switch (adev->asic_type) {
1444 	case CHIP_CARRIZO:
1445 	case CHIP_STONEY:
1446 		init_data.flags.gpu_vm_support = true;
1447 		break;
1448 	default:
1449 		switch (adev->ip_versions[DCE_HWIP][0]) {
1450 		case IP_VERSION(1, 0, 0):
1451 		case IP_VERSION(1, 0, 1):
1452 			/* enable S/G on PCO and RV2 */
1453 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1454 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1455 				init_data.flags.gpu_vm_support = true;
1456 			break;
1457 		case IP_VERSION(2, 1, 0):
1458 		case IP_VERSION(3, 0, 1):
1459 		case IP_VERSION(3, 1, 2):
1460 		case IP_VERSION(3, 1, 3):
1461 		case IP_VERSION(3, 1, 5):
1462 		case IP_VERSION(3, 1, 6):
1463 			init_data.flags.gpu_vm_support = true;
1464 			break;
1465 		default:
1466 			break;
1467 		}
1468 		break;
1469 	}
1470 
1471 	if (init_data.flags.gpu_vm_support)
1472 		adev->mode_info.gpu_vm_support = true;
1473 
1474 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1475 		init_data.flags.fbc_support = true;
1476 
1477 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1478 		init_data.flags.multi_mon_pp_mclk_switch = true;
1479 
1480 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1481 		init_data.flags.disable_fractional_pwm = true;
1482 
1483 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1484 		init_data.flags.edp_no_power_sequencing = true;
1485 
1486 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1487 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1488 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1489 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1490 
1491 	init_data.flags.seamless_boot_edp_requested = false;
1492 
1493 	if (check_seamless_boot_capability(adev)) {
1494 		init_data.flags.seamless_boot_edp_requested = true;
1495 		init_data.flags.allow_seamless_boot_optimization = true;
1496 		DRM_INFO("Seamless boot condition check passed\n");
1497 	}
1498 
1499 	init_data.flags.enable_mipi_converter_optimization = true;
1500 
1501 	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1502 	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1503 
1504 	INIT_LIST_HEAD(&adev->dm.da_list);
1505 
1506 	retrieve_dmi_info(&adev->dm);
1507 
1508 	/* Display Core create. */
1509 	adev->dm.dc = dc_create(&init_data);
1510 
1511 	if (adev->dm.dc) {
1512 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1513 	} else {
1514 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1515 		goto error;
1516 	}
1517 
1518 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1519 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1520 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1521 	}
1522 
1523 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1524 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1525 	if (dm_should_disable_stutter(adev->pdev))
1526 		adev->dm.dc->debug.disable_stutter = true;
1527 
1528 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1529 		adev->dm.dc->debug.disable_stutter = true;
1530 
1531 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1532 		adev->dm.dc->debug.disable_dsc = true;
1533 		adev->dm.dc->debug.disable_dsc_edp = true;
1534 	}
1535 
1536 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1537 		adev->dm.dc->debug.disable_clock_gate = true;
1538 
1539 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1540 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1541 
1542 	adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1543 
1544 	r = dm_dmub_hw_init(adev);
1545 	if (r) {
1546 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1547 		goto error;
1548 	}
1549 
1550 	dc_hardware_init(adev->dm.dc);
1551 
1552 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1553 	if (!adev->dm.hpd_rx_offload_wq) {
1554 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1555 		goto error;
1556 	}
1557 
1558 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1559 		struct dc_phy_addr_space_config pa_config;
1560 
1561 		mmhub_read_system_context(adev, &pa_config);
1562 
1563 		// Call the DC init_memory func
1564 		dc_setup_system_context(adev->dm.dc, &pa_config);
1565 	}
1566 
1567 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1568 	if (!adev->dm.freesync_module) {
1569 		DRM_ERROR(
1570 		"amdgpu: failed to initialize freesync_module.\n");
1571 	} else
1572 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1573 				adev->dm.freesync_module);
1574 
1575 	amdgpu_dm_init_color_mod();
1576 
1577 	if (adev->dm.dc->caps.max_links > 0) {
1578 		adev->dm.vblank_control_workqueue =
1579 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1580 		if (!adev->dm.vblank_control_workqueue)
1581 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1582 	}
1583 
1584 #ifdef CONFIG_DRM_AMD_DC_HDCP
1585 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1586 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1587 
1588 		if (!adev->dm.hdcp_workqueue)
1589 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 		else
1591 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1592 
1593 		dc_init_callbacks(adev->dm.dc, &init_params);
1594 	}
1595 #endif
1596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1597 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1598 #endif
1599 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1600 		init_completion(&adev->dm.dmub_aux_transfer_done);
1601 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1602 		if (!adev->dm.dmub_notify) {
1603 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1604 			goto error;
1605 		}
1606 
1607 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1608 		if (!adev->dm.delayed_hpd_wq) {
1609 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1610 			goto error;
1611 		}
1612 
1613 		amdgpu_dm_outbox_init(adev);
1614 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615 			dmub_aux_setconfig_callback, false)) {
1616 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1617 			goto error;
1618 		}
1619 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1621 			goto error;
1622 		}
1623 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1625 			goto error;
1626 		}
1627 	}
1628 
1629 	if (amdgpu_dm_initialize_drm_device(adev)) {
1630 		DRM_ERROR(
1631 		"amdgpu: failed to initialize sw for display support.\n");
1632 		goto error;
1633 	}
1634 
1635 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1636 	 * It is expected that DMUB will resend any pending notifications at this point, for
1637 	 * example HPD from DPIA.
1638 	 */
1639 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1640 		dc_enable_dmub_outbox(adev->dm.dc);
1641 
1642 	/* create fake encoders for MST */
1643 	dm_dp_create_fake_mst_encoders(adev);
1644 
1645 	/* TODO: Add_display_info? */
1646 
1647 	/* TODO use dynamic cursor width */
1648 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1649 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1650 
1651 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1652 		DRM_ERROR(
1653 		"amdgpu: failed to initialize sw for display support.\n");
1654 		goto error;
1655 	}
1656 
1657 
1658 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1659 
1660 	return 0;
1661 error:
1662 	amdgpu_dm_fini(adev);
1663 
1664 	return -EINVAL;
1665 }
1666 
1667 static int amdgpu_dm_early_fini(void *handle)
1668 {
1669 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 
1671 	amdgpu_dm_audio_fini(adev);
1672 
1673 	return 0;
1674 }
1675 
1676 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1677 {
1678 	int i;
1679 
1680 	if (adev->dm.vblank_control_workqueue) {
1681 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1682 		adev->dm.vblank_control_workqueue = NULL;
1683 	}
1684 
1685 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1686 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1687 	}
1688 
1689 	amdgpu_dm_destroy_drm_device(&adev->dm);
1690 
1691 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1692 	if (adev->dm.crc_rd_wrk) {
1693 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1694 		kfree(adev->dm.crc_rd_wrk);
1695 		adev->dm.crc_rd_wrk = NULL;
1696 	}
1697 #endif
1698 #ifdef CONFIG_DRM_AMD_DC_HDCP
1699 	if (adev->dm.hdcp_workqueue) {
1700 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1701 		adev->dm.hdcp_workqueue = NULL;
1702 	}
1703 
1704 	if (adev->dm.dc)
1705 		dc_deinit_callbacks(adev->dm.dc);
1706 #endif
1707 
1708 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1709 
1710 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1711 		kfree(adev->dm.dmub_notify);
1712 		adev->dm.dmub_notify = NULL;
1713 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1714 		adev->dm.delayed_hpd_wq = NULL;
1715 	}
1716 
1717 	if (adev->dm.dmub_bo)
1718 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1719 				      &adev->dm.dmub_bo_gpu_addr,
1720 				      &adev->dm.dmub_bo_cpu_addr);
1721 
1722 	if (adev->dm.hpd_rx_offload_wq) {
1723 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1724 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1725 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1726 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1727 			}
1728 		}
1729 
1730 		kfree(adev->dm.hpd_rx_offload_wq);
1731 		adev->dm.hpd_rx_offload_wq = NULL;
1732 	}
1733 
1734 	/* DC Destroy TODO: Replace destroy DAL */
1735 	if (adev->dm.dc)
1736 		dc_destroy(&adev->dm.dc);
1737 	/*
1738 	 * TODO: pageflip, vlank interrupt
1739 	 *
1740 	 * amdgpu_dm_irq_fini(adev);
1741 	 */
1742 
1743 	if (adev->dm.cgs_device) {
1744 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1745 		adev->dm.cgs_device = NULL;
1746 	}
1747 	if (adev->dm.freesync_module) {
1748 		mod_freesync_destroy(adev->dm.freesync_module);
1749 		adev->dm.freesync_module = NULL;
1750 	}
1751 
1752 	mutex_destroy(&adev->dm.audio_lock);
1753 	mutex_destroy(&adev->dm.dc_lock);
1754 
1755 	return;
1756 }
1757 
1758 static int load_dmcu_fw(struct amdgpu_device *adev)
1759 {
1760 	const char *fw_name_dmcu = NULL;
1761 	int r;
1762 	const struct dmcu_firmware_header_v1_0 *hdr;
1763 
1764 	switch(adev->asic_type) {
1765 #if defined(CONFIG_DRM_AMD_DC_SI)
1766 	case CHIP_TAHITI:
1767 	case CHIP_PITCAIRN:
1768 	case CHIP_VERDE:
1769 	case CHIP_OLAND:
1770 #endif
1771 	case CHIP_BONAIRE:
1772 	case CHIP_HAWAII:
1773 	case CHIP_KAVERI:
1774 	case CHIP_KABINI:
1775 	case CHIP_MULLINS:
1776 	case CHIP_TONGA:
1777 	case CHIP_FIJI:
1778 	case CHIP_CARRIZO:
1779 	case CHIP_STONEY:
1780 	case CHIP_POLARIS11:
1781 	case CHIP_POLARIS10:
1782 	case CHIP_POLARIS12:
1783 	case CHIP_VEGAM:
1784 	case CHIP_VEGA10:
1785 	case CHIP_VEGA12:
1786 	case CHIP_VEGA20:
1787 		return 0;
1788 	case CHIP_NAVI12:
1789 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1790 		break;
1791 	case CHIP_RAVEN:
1792 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1793 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1794 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1795 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1796 		else
1797 			return 0;
1798 		break;
1799 	default:
1800 		switch (adev->ip_versions[DCE_HWIP][0]) {
1801 		case IP_VERSION(2, 0, 2):
1802 		case IP_VERSION(2, 0, 3):
1803 		case IP_VERSION(2, 0, 0):
1804 		case IP_VERSION(2, 1, 0):
1805 		case IP_VERSION(3, 0, 0):
1806 		case IP_VERSION(3, 0, 2):
1807 		case IP_VERSION(3, 0, 3):
1808 		case IP_VERSION(3, 0, 1):
1809 		case IP_VERSION(3, 1, 2):
1810 		case IP_VERSION(3, 1, 3):
1811 		case IP_VERSION(3, 1, 4):
1812 		case IP_VERSION(3, 1, 5):
1813 		case IP_VERSION(3, 1, 6):
1814 		case IP_VERSION(3, 2, 0):
1815 		case IP_VERSION(3, 2, 1):
1816 			return 0;
1817 		default:
1818 			break;
1819 		}
1820 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1821 		return -EINVAL;
1822 	}
1823 
1824 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1825 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1826 		return 0;
1827 	}
1828 
1829 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1830 	if (r == -ENOENT) {
1831 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1832 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1833 		adev->dm.fw_dmcu = NULL;
1834 		return 0;
1835 	}
1836 	if (r) {
1837 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1838 			fw_name_dmcu);
1839 		return r;
1840 	}
1841 
1842 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1843 	if (r) {
1844 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1845 			fw_name_dmcu);
1846 		release_firmware(adev->dm.fw_dmcu);
1847 		adev->dm.fw_dmcu = NULL;
1848 		return r;
1849 	}
1850 
1851 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1852 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1853 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1854 	adev->firmware.fw_size +=
1855 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1856 
1857 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1858 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1859 	adev->firmware.fw_size +=
1860 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1861 
1862 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1863 
1864 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1865 
1866 	return 0;
1867 }
1868 
1869 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1870 {
1871 	struct amdgpu_device *adev = ctx;
1872 
1873 	return dm_read_reg(adev->dm.dc->ctx, address);
1874 }
1875 
1876 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1877 				     uint32_t value)
1878 {
1879 	struct amdgpu_device *adev = ctx;
1880 
1881 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1882 }
1883 
1884 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1885 {
1886 	struct dmub_srv_create_params create_params;
1887 	struct dmub_srv_region_params region_params;
1888 	struct dmub_srv_region_info region_info;
1889 	struct dmub_srv_fb_params fb_params;
1890 	struct dmub_srv_fb_info *fb_info;
1891 	struct dmub_srv *dmub_srv;
1892 	const struct dmcub_firmware_header_v1_0 *hdr;
1893 	const char *fw_name_dmub;
1894 	enum dmub_asic dmub_asic;
1895 	enum dmub_status status;
1896 	int r;
1897 
1898 	switch (adev->ip_versions[DCE_HWIP][0]) {
1899 	case IP_VERSION(2, 1, 0):
1900 		dmub_asic = DMUB_ASIC_DCN21;
1901 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1902 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1903 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1904 		break;
1905 	case IP_VERSION(3, 0, 0):
1906 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1907 			dmub_asic = DMUB_ASIC_DCN30;
1908 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1909 		} else {
1910 			dmub_asic = DMUB_ASIC_DCN30;
1911 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1912 		}
1913 		break;
1914 	case IP_VERSION(3, 0, 1):
1915 		dmub_asic = DMUB_ASIC_DCN301;
1916 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1917 		break;
1918 	case IP_VERSION(3, 0, 2):
1919 		dmub_asic = DMUB_ASIC_DCN302;
1920 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1921 		break;
1922 	case IP_VERSION(3, 0, 3):
1923 		dmub_asic = DMUB_ASIC_DCN303;
1924 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1925 		break;
1926 	case IP_VERSION(3, 1, 2):
1927 	case IP_VERSION(3, 1, 3):
1928 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1929 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1930 		break;
1931 	case IP_VERSION(3, 1, 4):
1932 		dmub_asic = DMUB_ASIC_DCN314;
1933 		fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1934 		break;
1935 	case IP_VERSION(3, 1, 5):
1936 		dmub_asic = DMUB_ASIC_DCN315;
1937 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1938 		break;
1939 	case IP_VERSION(3, 1, 6):
1940 		dmub_asic = DMUB_ASIC_DCN316;
1941 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1942 		break;
1943 	case IP_VERSION(3, 2, 0):
1944 		dmub_asic = DMUB_ASIC_DCN32;
1945 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1946 		break;
1947 	case IP_VERSION(3, 2, 1):
1948 		dmub_asic = DMUB_ASIC_DCN321;
1949 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1950 		break;
1951 	default:
1952 		/* ASIC doesn't support DMUB. */
1953 		return 0;
1954 	}
1955 
1956 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1957 	if (r) {
1958 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1959 		return 0;
1960 	}
1961 
1962 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1963 	if (r) {
1964 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1965 		return 0;
1966 	}
1967 
1968 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1969 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1970 
1971 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1972 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1973 			AMDGPU_UCODE_ID_DMCUB;
1974 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1975 			adev->dm.dmub_fw;
1976 		adev->firmware.fw_size +=
1977 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1978 
1979 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1980 			 adev->dm.dmcub_fw_version);
1981 	}
1982 
1983 
1984 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1985 	dmub_srv = adev->dm.dmub_srv;
1986 
1987 	if (!dmub_srv) {
1988 		DRM_ERROR("Failed to allocate DMUB service!\n");
1989 		return -ENOMEM;
1990 	}
1991 
1992 	memset(&create_params, 0, sizeof(create_params));
1993 	create_params.user_ctx = adev;
1994 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1995 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1996 	create_params.asic = dmub_asic;
1997 
1998 	/* Create the DMUB service. */
1999 	status = dmub_srv_create(dmub_srv, &create_params);
2000 	if (status != DMUB_STATUS_OK) {
2001 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2002 		return -EINVAL;
2003 	}
2004 
2005 	/* Calculate the size of all the regions for the DMUB service. */
2006 	memset(&region_params, 0, sizeof(region_params));
2007 
2008 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2009 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2010 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2011 	region_params.vbios_size = adev->bios_size;
2012 	region_params.fw_bss_data = region_params.bss_data_size ?
2013 		adev->dm.dmub_fw->data +
2014 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2015 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2016 	region_params.fw_inst_const =
2017 		adev->dm.dmub_fw->data +
2018 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2019 		PSP_HEADER_BYTES;
2020 
2021 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2022 					   &region_info);
2023 
2024 	if (status != DMUB_STATUS_OK) {
2025 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2026 		return -EINVAL;
2027 	}
2028 
2029 	/*
2030 	 * Allocate a framebuffer based on the total size of all the regions.
2031 	 * TODO: Move this into GART.
2032 	 */
2033 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2034 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2035 				    &adev->dm.dmub_bo_gpu_addr,
2036 				    &adev->dm.dmub_bo_cpu_addr);
2037 	if (r)
2038 		return r;
2039 
2040 	/* Rebase the regions on the framebuffer address. */
2041 	memset(&fb_params, 0, sizeof(fb_params));
2042 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2043 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2044 	fb_params.region_info = &region_info;
2045 
2046 	adev->dm.dmub_fb_info =
2047 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2048 	fb_info = adev->dm.dmub_fb_info;
2049 
2050 	if (!fb_info) {
2051 		DRM_ERROR(
2052 			"Failed to allocate framebuffer info for DMUB service!\n");
2053 		return -ENOMEM;
2054 	}
2055 
2056 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2057 	if (status != DMUB_STATUS_OK) {
2058 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2059 		return -EINVAL;
2060 	}
2061 
2062 	return 0;
2063 }
2064 
2065 static int dm_sw_init(void *handle)
2066 {
2067 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2068 	int r;
2069 
2070 	r = dm_dmub_sw_init(adev);
2071 	if (r)
2072 		return r;
2073 
2074 	return load_dmcu_fw(adev);
2075 }
2076 
2077 static int dm_sw_fini(void *handle)
2078 {
2079 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2080 
2081 	kfree(adev->dm.dmub_fb_info);
2082 	adev->dm.dmub_fb_info = NULL;
2083 
2084 	if (adev->dm.dmub_srv) {
2085 		dmub_srv_destroy(adev->dm.dmub_srv);
2086 		adev->dm.dmub_srv = NULL;
2087 	}
2088 
2089 	release_firmware(adev->dm.dmub_fw);
2090 	adev->dm.dmub_fw = NULL;
2091 
2092 	release_firmware(adev->dm.fw_dmcu);
2093 	adev->dm.fw_dmcu = NULL;
2094 
2095 	return 0;
2096 }
2097 
2098 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2099 {
2100 	struct amdgpu_dm_connector *aconnector;
2101 	struct drm_connector *connector;
2102 	struct drm_connector_list_iter iter;
2103 	int ret = 0;
2104 
2105 	drm_connector_list_iter_begin(dev, &iter);
2106 	drm_for_each_connector_iter(connector, &iter) {
2107 		aconnector = to_amdgpu_dm_connector(connector);
2108 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2109 		    aconnector->mst_mgr.aux) {
2110 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2111 					 aconnector,
2112 					 aconnector->base.base.id);
2113 
2114 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2115 			if (ret < 0) {
2116 				DRM_ERROR("DM_MST: Failed to start MST\n");
2117 				aconnector->dc_link->type =
2118 					dc_connection_single;
2119 				break;
2120 			}
2121 		}
2122 	}
2123 	drm_connector_list_iter_end(&iter);
2124 
2125 	return ret;
2126 }
2127 
2128 static int dm_late_init(void *handle)
2129 {
2130 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2131 
2132 	struct dmcu_iram_parameters params;
2133 	unsigned int linear_lut[16];
2134 	int i;
2135 	struct dmcu *dmcu = NULL;
2136 
2137 	dmcu = adev->dm.dc->res_pool->dmcu;
2138 
2139 	for (i = 0; i < 16; i++)
2140 		linear_lut[i] = 0xFFFF * i / 15;
2141 
2142 	params.set = 0;
2143 	params.backlight_ramping_override = false;
2144 	params.backlight_ramping_start = 0xCCCC;
2145 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2146 	params.backlight_lut_array_size = 16;
2147 	params.backlight_lut_array = linear_lut;
2148 
2149 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2150 	 * 0xFFFF x 0.01 = 0x28F
2151 	 */
2152 	params.min_abm_backlight = 0x28F;
2153 	/* In the case where abm is implemented on dmcub,
2154 	* dmcu object will be null.
2155 	* ABM 2.4 and up are implemented on dmcub.
2156 	*/
2157 	if (dmcu) {
2158 		if (!dmcu_load_iram(dmcu, params))
2159 			return -EINVAL;
2160 	} else if (adev->dm.dc->ctx->dmub_srv) {
2161 		struct dc_link *edp_links[MAX_NUM_EDP];
2162 		int edp_num;
2163 
2164 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2165 		for (i = 0; i < edp_num; i++) {
2166 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2167 				return -EINVAL;
2168 		}
2169 	}
2170 
2171 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2172 }
2173 
2174 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2175 {
2176 	struct amdgpu_dm_connector *aconnector;
2177 	struct drm_connector *connector;
2178 	struct drm_connector_list_iter iter;
2179 	struct drm_dp_mst_topology_mgr *mgr;
2180 	int ret;
2181 	bool need_hotplug = false;
2182 
2183 	drm_connector_list_iter_begin(dev, &iter);
2184 	drm_for_each_connector_iter(connector, &iter) {
2185 		aconnector = to_amdgpu_dm_connector(connector);
2186 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2187 		    aconnector->mst_port)
2188 			continue;
2189 
2190 		mgr = &aconnector->mst_mgr;
2191 
2192 		if (suspend) {
2193 			drm_dp_mst_topology_mgr_suspend(mgr);
2194 		} else {
2195 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2196 			if (ret < 0) {
2197 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2198 					aconnector->dc_link);
2199 				need_hotplug = true;
2200 			}
2201 		}
2202 	}
2203 	drm_connector_list_iter_end(&iter);
2204 
2205 	if (need_hotplug)
2206 		drm_kms_helper_hotplug_event(dev);
2207 }
2208 
2209 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2210 {
2211 	int ret = 0;
2212 
2213 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2214 	 * on window driver dc implementation.
2215 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2216 	 * should be passed to smu during boot up and resume from s3.
2217 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2218 	 * dcn20_resource_construct
2219 	 * then call pplib functions below to pass the settings to smu:
2220 	 * smu_set_watermarks_for_clock_ranges
2221 	 * smu_set_watermarks_table
2222 	 * navi10_set_watermarks_table
2223 	 * smu_write_watermarks_table
2224 	 *
2225 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2226 	 * dc has implemented different flow for window driver:
2227 	 * dc_hardware_init / dc_set_power_state
2228 	 * dcn10_init_hw
2229 	 * notify_wm_ranges
2230 	 * set_wm_ranges
2231 	 * -- Linux
2232 	 * smu_set_watermarks_for_clock_ranges
2233 	 * renoir_set_watermarks_table
2234 	 * smu_write_watermarks_table
2235 	 *
2236 	 * For Linux,
2237 	 * dc_hardware_init -> amdgpu_dm_init
2238 	 * dc_set_power_state --> dm_resume
2239 	 *
2240 	 * therefore, this function apply to navi10/12/14 but not Renoir
2241 	 * *
2242 	 */
2243 	switch (adev->ip_versions[DCE_HWIP][0]) {
2244 	case IP_VERSION(2, 0, 2):
2245 	case IP_VERSION(2, 0, 0):
2246 		break;
2247 	default:
2248 		return 0;
2249 	}
2250 
2251 	ret = amdgpu_dpm_write_watermarks_table(adev);
2252 	if (ret) {
2253 		DRM_ERROR("Failed to update WMTABLE!\n");
2254 		return ret;
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 /**
2261  * dm_hw_init() - Initialize DC device
2262  * @handle: The base driver device containing the amdgpu_dm device.
2263  *
2264  * Initialize the &struct amdgpu_display_manager device. This involves calling
2265  * the initializers of each DM component, then populating the struct with them.
2266  *
2267  * Although the function implies hardware initialization, both hardware and
2268  * software are initialized here. Splitting them out to their relevant init
2269  * hooks is a future TODO item.
2270  *
2271  * Some notable things that are initialized here:
2272  *
2273  * - Display Core, both software and hardware
2274  * - DC modules that we need (freesync and color management)
2275  * - DRM software states
2276  * - Interrupt sources and handlers
2277  * - Vblank support
2278  * - Debug FS entries, if enabled
2279  */
2280 static int dm_hw_init(void *handle)
2281 {
2282 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2283 	/* Create DAL display manager */
2284 	amdgpu_dm_init(adev);
2285 	amdgpu_dm_hpd_init(adev);
2286 
2287 	return 0;
2288 }
2289 
2290 /**
2291  * dm_hw_fini() - Teardown DC device
2292  * @handle: The base driver device containing the amdgpu_dm device.
2293  *
2294  * Teardown components within &struct amdgpu_display_manager that require
2295  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2296  * were loaded. Also flush IRQ workqueues and disable them.
2297  */
2298 static int dm_hw_fini(void *handle)
2299 {
2300 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2301 
2302 	amdgpu_dm_hpd_fini(adev);
2303 
2304 	amdgpu_dm_irq_fini(adev);
2305 	amdgpu_dm_fini(adev);
2306 	return 0;
2307 }
2308 
2309 
2310 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2311 				 struct dc_state *state, bool enable)
2312 {
2313 	enum dc_irq_source irq_source;
2314 	struct amdgpu_crtc *acrtc;
2315 	int rc = -EBUSY;
2316 	int i = 0;
2317 
2318 	for (i = 0; i < state->stream_count; i++) {
2319 		acrtc = get_crtc_by_otg_inst(
2320 				adev, state->stream_status[i].primary_otg_inst);
2321 
2322 		if (acrtc && state->stream_status[i].plane_count != 0) {
2323 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2324 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2325 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2326 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2327 			if (rc)
2328 				DRM_WARN("Failed to %s pflip interrupts\n",
2329 					 enable ? "enable" : "disable");
2330 
2331 			if (enable) {
2332 				rc = dm_enable_vblank(&acrtc->base);
2333 				if (rc)
2334 					DRM_WARN("Failed to enable vblank interrupts\n");
2335 			} else {
2336 				dm_disable_vblank(&acrtc->base);
2337 			}
2338 
2339 		}
2340 	}
2341 
2342 }
2343 
2344 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2345 {
2346 	struct dc_state *context = NULL;
2347 	enum dc_status res = DC_ERROR_UNEXPECTED;
2348 	int i;
2349 	struct dc_stream_state *del_streams[MAX_PIPES];
2350 	int del_streams_count = 0;
2351 
2352 	memset(del_streams, 0, sizeof(del_streams));
2353 
2354 	context = dc_create_state(dc);
2355 	if (context == NULL)
2356 		goto context_alloc_fail;
2357 
2358 	dc_resource_state_copy_construct_current(dc, context);
2359 
2360 	/* First remove from context all streams */
2361 	for (i = 0; i < context->stream_count; i++) {
2362 		struct dc_stream_state *stream = context->streams[i];
2363 
2364 		del_streams[del_streams_count++] = stream;
2365 	}
2366 
2367 	/* Remove all planes for removed streams and then remove the streams */
2368 	for (i = 0; i < del_streams_count; i++) {
2369 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2370 			res = DC_FAIL_DETACH_SURFACES;
2371 			goto fail;
2372 		}
2373 
2374 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2375 		if (res != DC_OK)
2376 			goto fail;
2377 	}
2378 
2379 	res = dc_commit_state(dc, context);
2380 
2381 fail:
2382 	dc_release_state(context);
2383 
2384 context_alloc_fail:
2385 	return res;
2386 }
2387 
2388 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2389 {
2390 	int i;
2391 
2392 	if (dm->hpd_rx_offload_wq) {
2393 		for (i = 0; i < dm->dc->caps.max_links; i++)
2394 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2395 	}
2396 }
2397 
2398 static int dm_suspend(void *handle)
2399 {
2400 	struct amdgpu_device *adev = handle;
2401 	struct amdgpu_display_manager *dm = &adev->dm;
2402 	int ret = 0;
2403 
2404 	if (amdgpu_in_reset(adev)) {
2405 		mutex_lock(&dm->dc_lock);
2406 
2407 		dc_allow_idle_optimizations(adev->dm.dc, false);
2408 
2409 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2410 
2411 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2412 
2413 		amdgpu_dm_commit_zero_streams(dm->dc);
2414 
2415 		amdgpu_dm_irq_suspend(adev);
2416 
2417 		hpd_rx_irq_work_suspend(dm);
2418 
2419 		return ret;
2420 	}
2421 
2422 	WARN_ON(adev->dm.cached_state);
2423 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2424 
2425 	s3_handle_mst(adev_to_drm(adev), true);
2426 
2427 	amdgpu_dm_irq_suspend(adev);
2428 
2429 	hpd_rx_irq_work_suspend(dm);
2430 
2431 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2432 
2433 	return 0;
2434 }
2435 
2436 struct amdgpu_dm_connector *
2437 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2438 					     struct drm_crtc *crtc)
2439 {
2440 	uint32_t i;
2441 	struct drm_connector_state *new_con_state;
2442 	struct drm_connector *connector;
2443 	struct drm_crtc *crtc_from_state;
2444 
2445 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2446 		crtc_from_state = new_con_state->crtc;
2447 
2448 		if (crtc_from_state == crtc)
2449 			return to_amdgpu_dm_connector(connector);
2450 	}
2451 
2452 	return NULL;
2453 }
2454 
2455 static void emulated_link_detect(struct dc_link *link)
2456 {
2457 	struct dc_sink_init_data sink_init_data = { 0 };
2458 	struct display_sink_capability sink_caps = { 0 };
2459 	enum dc_edid_status edid_status;
2460 	struct dc_context *dc_ctx = link->ctx;
2461 	struct dc_sink *sink = NULL;
2462 	struct dc_sink *prev_sink = NULL;
2463 
2464 	link->type = dc_connection_none;
2465 	prev_sink = link->local_sink;
2466 
2467 	if (prev_sink)
2468 		dc_sink_release(prev_sink);
2469 
2470 	switch (link->connector_signal) {
2471 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2472 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2473 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2474 		break;
2475 	}
2476 
2477 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2478 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2479 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2480 		break;
2481 	}
2482 
2483 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2484 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2486 		break;
2487 	}
2488 
2489 	case SIGNAL_TYPE_LVDS: {
2490 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2492 		break;
2493 	}
2494 
2495 	case SIGNAL_TYPE_EDP: {
2496 		sink_caps.transaction_type =
2497 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2498 		sink_caps.signal = SIGNAL_TYPE_EDP;
2499 		break;
2500 	}
2501 
2502 	case SIGNAL_TYPE_DISPLAY_PORT: {
2503 		sink_caps.transaction_type =
2504 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2505 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2506 		break;
2507 	}
2508 
2509 	default:
2510 		DC_ERROR("Invalid connector type! signal:%d\n",
2511 			link->connector_signal);
2512 		return;
2513 	}
2514 
2515 	sink_init_data.link = link;
2516 	sink_init_data.sink_signal = sink_caps.signal;
2517 
2518 	sink = dc_sink_create(&sink_init_data);
2519 	if (!sink) {
2520 		DC_ERROR("Failed to create sink!\n");
2521 		return;
2522 	}
2523 
2524 	/* dc_sink_create returns a new reference */
2525 	link->local_sink = sink;
2526 
2527 	edid_status = dm_helpers_read_local_edid(
2528 			link->ctx,
2529 			link,
2530 			sink);
2531 
2532 	if (edid_status != EDID_OK)
2533 		DC_ERROR("Failed to read EDID");
2534 
2535 }
2536 
2537 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2538 				     struct amdgpu_display_manager *dm)
2539 {
2540 	struct {
2541 		struct dc_surface_update surface_updates[MAX_SURFACES];
2542 		struct dc_plane_info plane_infos[MAX_SURFACES];
2543 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2544 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2545 		struct dc_stream_update stream_update;
2546 	} * bundle;
2547 	int k, m;
2548 
2549 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2550 
2551 	if (!bundle) {
2552 		dm_error("Failed to allocate update bundle\n");
2553 		goto cleanup;
2554 	}
2555 
2556 	for (k = 0; k < dc_state->stream_count; k++) {
2557 		bundle->stream_update.stream = dc_state->streams[k];
2558 
2559 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2560 			bundle->surface_updates[m].surface =
2561 				dc_state->stream_status->plane_states[m];
2562 			bundle->surface_updates[m].surface->force_full_update =
2563 				true;
2564 		}
2565 		dc_commit_updates_for_stream(
2566 			dm->dc, bundle->surface_updates,
2567 			dc_state->stream_status->plane_count,
2568 			dc_state->streams[k], &bundle->stream_update, dc_state);
2569 	}
2570 
2571 cleanup:
2572 	kfree(bundle);
2573 
2574 	return;
2575 }
2576 
2577 static int dm_resume(void *handle)
2578 {
2579 	struct amdgpu_device *adev = handle;
2580 	struct drm_device *ddev = adev_to_drm(adev);
2581 	struct amdgpu_display_manager *dm = &adev->dm;
2582 	struct amdgpu_dm_connector *aconnector;
2583 	struct drm_connector *connector;
2584 	struct drm_connector_list_iter iter;
2585 	struct drm_crtc *crtc;
2586 	struct drm_crtc_state *new_crtc_state;
2587 	struct dm_crtc_state *dm_new_crtc_state;
2588 	struct drm_plane *plane;
2589 	struct drm_plane_state *new_plane_state;
2590 	struct dm_plane_state *dm_new_plane_state;
2591 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2592 	enum dc_connection_type new_connection_type = dc_connection_none;
2593 	struct dc_state *dc_state;
2594 	int i, r, j;
2595 
2596 	if (amdgpu_in_reset(adev)) {
2597 		dc_state = dm->cached_dc_state;
2598 
2599 		/*
2600 		 * The dc->current_state is backed up into dm->cached_dc_state
2601 		 * before we commit 0 streams.
2602 		 *
2603 		 * DC will clear link encoder assignments on the real state
2604 		 * but the changes won't propagate over to the copy we made
2605 		 * before the 0 streams commit.
2606 		 *
2607 		 * DC expects that link encoder assignments are *not* valid
2608 		 * when committing a state, so as a workaround we can copy
2609 		 * off of the current state.
2610 		 *
2611 		 * We lose the previous assignments, but we had already
2612 		 * commit 0 streams anyway.
2613 		 */
2614 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2615 
2616 		r = dm_dmub_hw_init(adev);
2617 		if (r)
2618 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2619 
2620 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2621 		dc_resume(dm->dc);
2622 
2623 		amdgpu_dm_irq_resume_early(adev);
2624 
2625 		for (i = 0; i < dc_state->stream_count; i++) {
2626 			dc_state->streams[i]->mode_changed = true;
2627 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2628 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2629 					= 0xffffffff;
2630 			}
2631 		}
2632 
2633 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2634 			amdgpu_dm_outbox_init(adev);
2635 			dc_enable_dmub_outbox(adev->dm.dc);
2636 		}
2637 
2638 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2639 
2640 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2641 
2642 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2643 
2644 		dc_release_state(dm->cached_dc_state);
2645 		dm->cached_dc_state = NULL;
2646 
2647 		amdgpu_dm_irq_resume_late(adev);
2648 
2649 		mutex_unlock(&dm->dc_lock);
2650 
2651 		return 0;
2652 	}
2653 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2654 	dc_release_state(dm_state->context);
2655 	dm_state->context = dc_create_state(dm->dc);
2656 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2657 	dc_resource_state_construct(dm->dc, dm_state->context);
2658 
2659 	/* Before powering on DC we need to re-initialize DMUB. */
2660 	dm_dmub_hw_resume(adev);
2661 
2662 	/* Re-enable outbox interrupts for DPIA. */
2663 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2664 		amdgpu_dm_outbox_init(adev);
2665 		dc_enable_dmub_outbox(adev->dm.dc);
2666 	}
2667 
2668 	/* power on hardware */
2669 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2670 
2671 	/* program HPD filter */
2672 	dc_resume(dm->dc);
2673 
2674 	/*
2675 	 * early enable HPD Rx IRQ, should be done before set mode as short
2676 	 * pulse interrupts are used for MST
2677 	 */
2678 	amdgpu_dm_irq_resume_early(adev);
2679 
2680 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2681 	s3_handle_mst(ddev, false);
2682 
2683 	/* Do detection*/
2684 	drm_connector_list_iter_begin(ddev, &iter);
2685 	drm_for_each_connector_iter(connector, &iter) {
2686 		aconnector = to_amdgpu_dm_connector(connector);
2687 
2688 		/*
2689 		 * this is the case when traversing through already created
2690 		 * MST connectors, should be skipped
2691 		 */
2692 		if (aconnector->dc_link &&
2693 		    aconnector->dc_link->type == dc_connection_mst_branch)
2694 			continue;
2695 
2696 		mutex_lock(&aconnector->hpd_lock);
2697 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2698 			DRM_ERROR("KMS: Failed to detect connector\n");
2699 
2700 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2701 			emulated_link_detect(aconnector->dc_link);
2702 		} else {
2703 			mutex_lock(&dm->dc_lock);
2704 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2705 			mutex_unlock(&dm->dc_lock);
2706 		}
2707 
2708 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2709 			aconnector->fake_enable = false;
2710 
2711 		if (aconnector->dc_sink)
2712 			dc_sink_release(aconnector->dc_sink);
2713 		aconnector->dc_sink = NULL;
2714 		amdgpu_dm_update_connector_after_detect(aconnector);
2715 		mutex_unlock(&aconnector->hpd_lock);
2716 	}
2717 	drm_connector_list_iter_end(&iter);
2718 
2719 	/* Force mode set in atomic commit */
2720 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2721 		new_crtc_state->active_changed = true;
2722 
2723 	/*
2724 	 * atomic_check is expected to create the dc states. We need to release
2725 	 * them here, since they were duplicated as part of the suspend
2726 	 * procedure.
2727 	 */
2728 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2729 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2730 		if (dm_new_crtc_state->stream) {
2731 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2732 			dc_stream_release(dm_new_crtc_state->stream);
2733 			dm_new_crtc_state->stream = NULL;
2734 		}
2735 	}
2736 
2737 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2738 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2739 		if (dm_new_plane_state->dc_state) {
2740 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2741 			dc_plane_state_release(dm_new_plane_state->dc_state);
2742 			dm_new_plane_state->dc_state = NULL;
2743 		}
2744 	}
2745 
2746 	drm_atomic_helper_resume(ddev, dm->cached_state);
2747 
2748 	dm->cached_state = NULL;
2749 
2750 	amdgpu_dm_irq_resume_late(adev);
2751 
2752 	amdgpu_dm_smu_write_watermarks_table(adev);
2753 
2754 	return 0;
2755 }
2756 
2757 /**
2758  * DOC: DM Lifecycle
2759  *
2760  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2761  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2762  * the base driver's device list to be initialized and torn down accordingly.
2763  *
2764  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2765  */
2766 
2767 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2768 	.name = "dm",
2769 	.early_init = dm_early_init,
2770 	.late_init = dm_late_init,
2771 	.sw_init = dm_sw_init,
2772 	.sw_fini = dm_sw_fini,
2773 	.early_fini = amdgpu_dm_early_fini,
2774 	.hw_init = dm_hw_init,
2775 	.hw_fini = dm_hw_fini,
2776 	.suspend = dm_suspend,
2777 	.resume = dm_resume,
2778 	.is_idle = dm_is_idle,
2779 	.wait_for_idle = dm_wait_for_idle,
2780 	.check_soft_reset = dm_check_soft_reset,
2781 	.soft_reset = dm_soft_reset,
2782 	.set_clockgating_state = dm_set_clockgating_state,
2783 	.set_powergating_state = dm_set_powergating_state,
2784 };
2785 
2786 const struct amdgpu_ip_block_version dm_ip_block =
2787 {
2788 	.type = AMD_IP_BLOCK_TYPE_DCE,
2789 	.major = 1,
2790 	.minor = 0,
2791 	.rev = 0,
2792 	.funcs = &amdgpu_dm_funcs,
2793 };
2794 
2795 
2796 /**
2797  * DOC: atomic
2798  *
2799  * *WIP*
2800  */
2801 
2802 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2803 	.fb_create = amdgpu_display_user_framebuffer_create,
2804 	.get_format_info = amd_get_format_info,
2805 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2806 	.atomic_check = amdgpu_dm_atomic_check,
2807 	.atomic_commit = drm_atomic_helper_commit,
2808 };
2809 
2810 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2811 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2812 };
2813 
2814 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2815 {
2816 	u32 max_avg, min_cll, max, min, q, r;
2817 	struct amdgpu_dm_backlight_caps *caps;
2818 	struct amdgpu_display_manager *dm;
2819 	struct drm_connector *conn_base;
2820 	struct amdgpu_device *adev;
2821 	struct dc_link *link = NULL;
2822 	static const u8 pre_computed_values[] = {
2823 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2824 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2825 	int i;
2826 
2827 	if (!aconnector || !aconnector->dc_link)
2828 		return;
2829 
2830 	link = aconnector->dc_link;
2831 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2832 		return;
2833 
2834 	conn_base = &aconnector->base;
2835 	adev = drm_to_adev(conn_base->dev);
2836 	dm = &adev->dm;
2837 	for (i = 0; i < dm->num_of_edps; i++) {
2838 		if (link == dm->backlight_link[i])
2839 			break;
2840 	}
2841 	if (i >= dm->num_of_edps)
2842 		return;
2843 	caps = &dm->backlight_caps[i];
2844 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2845 	caps->aux_support = false;
2846 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2847 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2848 
2849 	if (caps->ext_caps->bits.oled == 1 /*||
2850 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2851 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2852 		caps->aux_support = true;
2853 
2854 	if (amdgpu_backlight == 0)
2855 		caps->aux_support = false;
2856 	else if (amdgpu_backlight == 1)
2857 		caps->aux_support = true;
2858 
2859 	/* From the specification (CTA-861-G), for calculating the maximum
2860 	 * luminance we need to use:
2861 	 *	Luminance = 50*2**(CV/32)
2862 	 * Where CV is a one-byte value.
2863 	 * For calculating this expression we may need float point precision;
2864 	 * to avoid this complexity level, we take advantage that CV is divided
2865 	 * by a constant. From the Euclids division algorithm, we know that CV
2866 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2867 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2868 	 * need to pre-compute the value of r/32. For pre-computing the values
2869 	 * We just used the following Ruby line:
2870 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2871 	 * The results of the above expressions can be verified at
2872 	 * pre_computed_values.
2873 	 */
2874 	q = max_avg >> 5;
2875 	r = max_avg % 32;
2876 	max = (1 << q) * pre_computed_values[r];
2877 
2878 	// min luminance: maxLum * (CV/255)^2 / 100
2879 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2880 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2881 
2882 	caps->aux_max_input_signal = max;
2883 	caps->aux_min_input_signal = min;
2884 }
2885 
2886 void amdgpu_dm_update_connector_after_detect(
2887 		struct amdgpu_dm_connector *aconnector)
2888 {
2889 	struct drm_connector *connector = &aconnector->base;
2890 	struct drm_device *dev = connector->dev;
2891 	struct dc_sink *sink;
2892 
2893 	/* MST handled by drm_mst framework */
2894 	if (aconnector->mst_mgr.mst_state == true)
2895 		return;
2896 
2897 	sink = aconnector->dc_link->local_sink;
2898 	if (sink)
2899 		dc_sink_retain(sink);
2900 
2901 	/*
2902 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2903 	 * the connector sink is set to either fake or physical sink depends on link status.
2904 	 * Skip if already done during boot.
2905 	 */
2906 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2907 			&& aconnector->dc_em_sink) {
2908 
2909 		/*
2910 		 * For S3 resume with headless use eml_sink to fake stream
2911 		 * because on resume connector->sink is set to NULL
2912 		 */
2913 		mutex_lock(&dev->mode_config.mutex);
2914 
2915 		if (sink) {
2916 			if (aconnector->dc_sink) {
2917 				amdgpu_dm_update_freesync_caps(connector, NULL);
2918 				/*
2919 				 * retain and release below are used to
2920 				 * bump up refcount for sink because the link doesn't point
2921 				 * to it anymore after disconnect, so on next crtc to connector
2922 				 * reshuffle by UMD we will get into unwanted dc_sink release
2923 				 */
2924 				dc_sink_release(aconnector->dc_sink);
2925 			}
2926 			aconnector->dc_sink = sink;
2927 			dc_sink_retain(aconnector->dc_sink);
2928 			amdgpu_dm_update_freesync_caps(connector,
2929 					aconnector->edid);
2930 		} else {
2931 			amdgpu_dm_update_freesync_caps(connector, NULL);
2932 			if (!aconnector->dc_sink) {
2933 				aconnector->dc_sink = aconnector->dc_em_sink;
2934 				dc_sink_retain(aconnector->dc_sink);
2935 			}
2936 		}
2937 
2938 		mutex_unlock(&dev->mode_config.mutex);
2939 
2940 		if (sink)
2941 			dc_sink_release(sink);
2942 		return;
2943 	}
2944 
2945 	/*
2946 	 * TODO: temporary guard to look for proper fix
2947 	 * if this sink is MST sink, we should not do anything
2948 	 */
2949 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2950 		dc_sink_release(sink);
2951 		return;
2952 	}
2953 
2954 	if (aconnector->dc_sink == sink) {
2955 		/*
2956 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2957 		 * Do nothing!!
2958 		 */
2959 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2960 				aconnector->connector_id);
2961 		if (sink)
2962 			dc_sink_release(sink);
2963 		return;
2964 	}
2965 
2966 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2967 		aconnector->connector_id, aconnector->dc_sink, sink);
2968 
2969 	mutex_lock(&dev->mode_config.mutex);
2970 
2971 	/*
2972 	 * 1. Update status of the drm connector
2973 	 * 2. Send an event and let userspace tell us what to do
2974 	 */
2975 	if (sink) {
2976 		/*
2977 		 * TODO: check if we still need the S3 mode update workaround.
2978 		 * If yes, put it here.
2979 		 */
2980 		if (aconnector->dc_sink) {
2981 			amdgpu_dm_update_freesync_caps(connector, NULL);
2982 			dc_sink_release(aconnector->dc_sink);
2983 		}
2984 
2985 		aconnector->dc_sink = sink;
2986 		dc_sink_retain(aconnector->dc_sink);
2987 		if (sink->dc_edid.length == 0) {
2988 			aconnector->edid = NULL;
2989 			if (aconnector->dc_link->aux_mode) {
2990 				drm_dp_cec_unset_edid(
2991 					&aconnector->dm_dp_aux.aux);
2992 			}
2993 		} else {
2994 			aconnector->edid =
2995 				(struct edid *)sink->dc_edid.raw_edid;
2996 
2997 			if (aconnector->dc_link->aux_mode)
2998 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2999 						    aconnector->edid);
3000 		}
3001 
3002 		drm_connector_update_edid_property(connector, aconnector->edid);
3003 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3004 		update_connector_ext_caps(aconnector);
3005 	} else {
3006 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3007 		amdgpu_dm_update_freesync_caps(connector, NULL);
3008 		drm_connector_update_edid_property(connector, NULL);
3009 		aconnector->num_modes = 0;
3010 		dc_sink_release(aconnector->dc_sink);
3011 		aconnector->dc_sink = NULL;
3012 		aconnector->edid = NULL;
3013 #ifdef CONFIG_DRM_AMD_DC_HDCP
3014 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3015 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3016 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3017 #endif
3018 	}
3019 
3020 	mutex_unlock(&dev->mode_config.mutex);
3021 
3022 	update_subconnector_property(aconnector);
3023 
3024 	if (sink)
3025 		dc_sink_release(sink);
3026 }
3027 
3028 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3029 {
3030 	struct drm_connector *connector = &aconnector->base;
3031 	struct drm_device *dev = connector->dev;
3032 	enum dc_connection_type new_connection_type = dc_connection_none;
3033 	struct amdgpu_device *adev = drm_to_adev(dev);
3034 #ifdef CONFIG_DRM_AMD_DC_HDCP
3035 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3036 #endif
3037 	bool ret = false;
3038 
3039 	if (adev->dm.disable_hpd_irq)
3040 		return;
3041 
3042 	/*
3043 	 * In case of failure or MST no need to update connector status or notify the OS
3044 	 * since (for MST case) MST does this in its own context.
3045 	 */
3046 	mutex_lock(&aconnector->hpd_lock);
3047 
3048 #ifdef CONFIG_DRM_AMD_DC_HDCP
3049 	if (adev->dm.hdcp_workqueue) {
3050 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3051 		dm_con_state->update_hdcp = true;
3052 	}
3053 #endif
3054 	if (aconnector->fake_enable)
3055 		aconnector->fake_enable = false;
3056 
3057 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3058 		DRM_ERROR("KMS: Failed to detect connector\n");
3059 
3060 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3061 		emulated_link_detect(aconnector->dc_link);
3062 
3063 		drm_modeset_lock_all(dev);
3064 		dm_restore_drm_connector_state(dev, connector);
3065 		drm_modeset_unlock_all(dev);
3066 
3067 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3068 			drm_kms_helper_connector_hotplug_event(connector);
3069 	} else {
3070 		mutex_lock(&adev->dm.dc_lock);
3071 		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3072 		mutex_unlock(&adev->dm.dc_lock);
3073 		if (ret) {
3074 			amdgpu_dm_update_connector_after_detect(aconnector);
3075 
3076 			drm_modeset_lock_all(dev);
3077 			dm_restore_drm_connector_state(dev, connector);
3078 			drm_modeset_unlock_all(dev);
3079 
3080 			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3081 				drm_kms_helper_connector_hotplug_event(connector);
3082 		}
3083 	}
3084 	mutex_unlock(&aconnector->hpd_lock);
3085 
3086 }
3087 
3088 static void handle_hpd_irq(void *param)
3089 {
3090 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3091 
3092 	handle_hpd_irq_helper(aconnector);
3093 
3094 }
3095 
3096 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3097 {
3098 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3099 	uint8_t dret;
3100 	bool new_irq_handled = false;
3101 	int dpcd_addr;
3102 	int dpcd_bytes_to_read;
3103 
3104 	const int max_process_count = 30;
3105 	int process_count = 0;
3106 
3107 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3108 
3109 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3110 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3111 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3112 		dpcd_addr = DP_SINK_COUNT;
3113 	} else {
3114 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3115 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3116 		dpcd_addr = DP_SINK_COUNT_ESI;
3117 	}
3118 
3119 	dret = drm_dp_dpcd_read(
3120 		&aconnector->dm_dp_aux.aux,
3121 		dpcd_addr,
3122 		esi,
3123 		dpcd_bytes_to_read);
3124 
3125 	while (dret == dpcd_bytes_to_read &&
3126 		process_count < max_process_count) {
3127 		uint8_t retry;
3128 		dret = 0;
3129 
3130 		process_count++;
3131 
3132 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3133 		/* handle HPD short pulse irq */
3134 		if (aconnector->mst_mgr.mst_state)
3135 			drm_dp_mst_hpd_irq(
3136 				&aconnector->mst_mgr,
3137 				esi,
3138 				&new_irq_handled);
3139 
3140 		if (new_irq_handled) {
3141 			/* ACK at DPCD to notify down stream */
3142 			const int ack_dpcd_bytes_to_write =
3143 				dpcd_bytes_to_read - 1;
3144 
3145 			for (retry = 0; retry < 3; retry++) {
3146 				uint8_t wret;
3147 
3148 				wret = drm_dp_dpcd_write(
3149 					&aconnector->dm_dp_aux.aux,
3150 					dpcd_addr + 1,
3151 					&esi[1],
3152 					ack_dpcd_bytes_to_write);
3153 				if (wret == ack_dpcd_bytes_to_write)
3154 					break;
3155 			}
3156 
3157 			/* check if there is new irq to be handled */
3158 			dret = drm_dp_dpcd_read(
3159 				&aconnector->dm_dp_aux.aux,
3160 				dpcd_addr,
3161 				esi,
3162 				dpcd_bytes_to_read);
3163 
3164 			new_irq_handled = false;
3165 		} else {
3166 			break;
3167 		}
3168 	}
3169 
3170 	if (process_count == max_process_count)
3171 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3172 }
3173 
3174 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3175 							union hpd_irq_data hpd_irq_data)
3176 {
3177 	struct hpd_rx_irq_offload_work *offload_work =
3178 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3179 
3180 	if (!offload_work) {
3181 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3182 		return;
3183 	}
3184 
3185 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3186 	offload_work->data = hpd_irq_data;
3187 	offload_work->offload_wq = offload_wq;
3188 
3189 	queue_work(offload_wq->wq, &offload_work->work);
3190 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3191 }
3192 
3193 static void handle_hpd_rx_irq(void *param)
3194 {
3195 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3196 	struct drm_connector *connector = &aconnector->base;
3197 	struct drm_device *dev = connector->dev;
3198 	struct dc_link *dc_link = aconnector->dc_link;
3199 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3200 	bool result = false;
3201 	enum dc_connection_type new_connection_type = dc_connection_none;
3202 	struct amdgpu_device *adev = drm_to_adev(dev);
3203 	union hpd_irq_data hpd_irq_data;
3204 	bool link_loss = false;
3205 	bool has_left_work = false;
3206 	int idx = aconnector->base.index;
3207 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3208 
3209 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3210 
3211 	if (adev->dm.disable_hpd_irq)
3212 		return;
3213 
3214 	/*
3215 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3216 	 * conflict, after implement i2c helper, this mutex should be
3217 	 * retired.
3218 	 */
3219 	mutex_lock(&aconnector->hpd_lock);
3220 
3221 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3222 						&link_loss, true, &has_left_work);
3223 
3224 	if (!has_left_work)
3225 		goto out;
3226 
3227 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3228 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3229 		goto out;
3230 	}
3231 
3232 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3233 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3234 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3235 			dm_handle_mst_sideband_msg(aconnector);
3236 			goto out;
3237 		}
3238 
3239 		if (link_loss) {
3240 			bool skip = false;
3241 
3242 			spin_lock(&offload_wq->offload_lock);
3243 			skip = offload_wq->is_handling_link_loss;
3244 
3245 			if (!skip)
3246 				offload_wq->is_handling_link_loss = true;
3247 
3248 			spin_unlock(&offload_wq->offload_lock);
3249 
3250 			if (!skip)
3251 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252 
3253 			goto out;
3254 		}
3255 	}
3256 
3257 out:
3258 	if (result && !is_mst_root_connector) {
3259 		/* Downstream Port status changed. */
3260 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3261 			DRM_ERROR("KMS: Failed to detect connector\n");
3262 
3263 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3264 			emulated_link_detect(dc_link);
3265 
3266 			if (aconnector->fake_enable)
3267 				aconnector->fake_enable = false;
3268 
3269 			amdgpu_dm_update_connector_after_detect(aconnector);
3270 
3271 
3272 			drm_modeset_lock_all(dev);
3273 			dm_restore_drm_connector_state(dev, connector);
3274 			drm_modeset_unlock_all(dev);
3275 
3276 			drm_kms_helper_connector_hotplug_event(connector);
3277 		} else {
3278 			bool ret = false;
3279 
3280 			mutex_lock(&adev->dm.dc_lock);
3281 			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3282 			mutex_unlock(&adev->dm.dc_lock);
3283 
3284 			if (ret) {
3285 				if (aconnector->fake_enable)
3286 					aconnector->fake_enable = false;
3287 
3288 				amdgpu_dm_update_connector_after_detect(aconnector);
3289 
3290 				drm_modeset_lock_all(dev);
3291 				dm_restore_drm_connector_state(dev, connector);
3292 				drm_modeset_unlock_all(dev);
3293 
3294 				drm_kms_helper_connector_hotplug_event(connector);
3295 			}
3296 		}
3297 	}
3298 #ifdef CONFIG_DRM_AMD_DC_HDCP
3299 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3300 		if (adev->dm.hdcp_workqueue)
3301 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3302 	}
3303 #endif
3304 
3305 	if (dc_link->type != dc_connection_mst_branch)
3306 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3307 
3308 	mutex_unlock(&aconnector->hpd_lock);
3309 }
3310 
3311 static void register_hpd_handlers(struct amdgpu_device *adev)
3312 {
3313 	struct drm_device *dev = adev_to_drm(adev);
3314 	struct drm_connector *connector;
3315 	struct amdgpu_dm_connector *aconnector;
3316 	const struct dc_link *dc_link;
3317 	struct dc_interrupt_params int_params = {0};
3318 
3319 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3320 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 
3322 	list_for_each_entry(connector,
3323 			&dev->mode_config.connector_list, head)	{
3324 
3325 		aconnector = to_amdgpu_dm_connector(connector);
3326 		dc_link = aconnector->dc_link;
3327 
3328 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3329 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3330 			int_params.irq_source = dc_link->irq_source_hpd;
3331 
3332 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3333 					handle_hpd_irq,
3334 					(void *) aconnector);
3335 		}
3336 
3337 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3338 
3339 			/* Also register for DP short pulse (hpd_rx). */
3340 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3341 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3342 
3343 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3344 					handle_hpd_rx_irq,
3345 					(void *) aconnector);
3346 
3347 			if (adev->dm.hpd_rx_offload_wq)
3348 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3349 					aconnector;
3350 		}
3351 	}
3352 }
3353 
3354 #if defined(CONFIG_DRM_AMD_DC_SI)
3355 /* Register IRQ sources and initialize IRQ callbacks */
3356 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3357 {
3358 	struct dc *dc = adev->dm.dc;
3359 	struct common_irq_params *c_irq_params;
3360 	struct dc_interrupt_params int_params = {0};
3361 	int r;
3362 	int i;
3363 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3364 
3365 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3366 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3367 
3368 	/*
3369 	 * Actions of amdgpu_irq_add_id():
3370 	 * 1. Register a set() function with base driver.
3371 	 *    Base driver will call set() function to enable/disable an
3372 	 *    interrupt in DC hardware.
3373 	 * 2. Register amdgpu_dm_irq_handler().
3374 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3375 	 *    coming from DC hardware.
3376 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3377 	 *    for acknowledging and handling. */
3378 
3379 	/* Use VBLANK interrupt */
3380 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3381 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3382 		if (r) {
3383 			DRM_ERROR("Failed to add crtc irq id!\n");
3384 			return r;
3385 		}
3386 
3387 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3388 		int_params.irq_source =
3389 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3390 
3391 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3392 
3393 		c_irq_params->adev = adev;
3394 		c_irq_params->irq_src = int_params.irq_source;
3395 
3396 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3397 				dm_crtc_high_irq, c_irq_params);
3398 	}
3399 
3400 	/* Use GRPH_PFLIP interrupt */
3401 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3402 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3403 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3404 		if (r) {
3405 			DRM_ERROR("Failed to add page flip irq id!\n");
3406 			return r;
3407 		}
3408 
3409 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3410 		int_params.irq_source =
3411 			dc_interrupt_to_irq_source(dc, i, 0);
3412 
3413 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3414 
3415 		c_irq_params->adev = adev;
3416 		c_irq_params->irq_src = int_params.irq_source;
3417 
3418 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3419 				dm_pflip_high_irq, c_irq_params);
3420 
3421 	}
3422 
3423 	/* HPD */
3424 	r = amdgpu_irq_add_id(adev, client_id,
3425 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3426 	if (r) {
3427 		DRM_ERROR("Failed to add hpd irq id!\n");
3428 		return r;
3429 	}
3430 
3431 	register_hpd_handlers(adev);
3432 
3433 	return 0;
3434 }
3435 #endif
3436 
3437 /* Register IRQ sources and initialize IRQ callbacks */
3438 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3439 {
3440 	struct dc *dc = adev->dm.dc;
3441 	struct common_irq_params *c_irq_params;
3442 	struct dc_interrupt_params int_params = {0};
3443 	int r;
3444 	int i;
3445 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3446 
3447 	if (adev->family >= AMDGPU_FAMILY_AI)
3448 		client_id = SOC15_IH_CLIENTID_DCE;
3449 
3450 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3451 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3452 
3453 	/*
3454 	 * Actions of amdgpu_irq_add_id():
3455 	 * 1. Register a set() function with base driver.
3456 	 *    Base driver will call set() function to enable/disable an
3457 	 *    interrupt in DC hardware.
3458 	 * 2. Register amdgpu_dm_irq_handler().
3459 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3460 	 *    coming from DC hardware.
3461 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3462 	 *    for acknowledging and handling. */
3463 
3464 	/* Use VBLANK interrupt */
3465 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3466 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3467 		if (r) {
3468 			DRM_ERROR("Failed to add crtc irq id!\n");
3469 			return r;
3470 		}
3471 
3472 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3473 		int_params.irq_source =
3474 			dc_interrupt_to_irq_source(dc, i, 0);
3475 
3476 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3477 
3478 		c_irq_params->adev = adev;
3479 		c_irq_params->irq_src = int_params.irq_source;
3480 
3481 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3482 				dm_crtc_high_irq, c_irq_params);
3483 	}
3484 
3485 	/* Use VUPDATE interrupt */
3486 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3487 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3488 		if (r) {
3489 			DRM_ERROR("Failed to add vupdate irq id!\n");
3490 			return r;
3491 		}
3492 
3493 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3494 		int_params.irq_source =
3495 			dc_interrupt_to_irq_source(dc, i, 0);
3496 
3497 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3498 
3499 		c_irq_params->adev = adev;
3500 		c_irq_params->irq_src = int_params.irq_source;
3501 
3502 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3503 				dm_vupdate_high_irq, c_irq_params);
3504 	}
3505 
3506 	/* Use GRPH_PFLIP interrupt */
3507 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3508 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3509 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3510 		if (r) {
3511 			DRM_ERROR("Failed to add page flip irq id!\n");
3512 			return r;
3513 		}
3514 
3515 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3516 		int_params.irq_source =
3517 			dc_interrupt_to_irq_source(dc, i, 0);
3518 
3519 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3520 
3521 		c_irq_params->adev = adev;
3522 		c_irq_params->irq_src = int_params.irq_source;
3523 
3524 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3525 				dm_pflip_high_irq, c_irq_params);
3526 
3527 	}
3528 
3529 	/* HPD */
3530 	r = amdgpu_irq_add_id(adev, client_id,
3531 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3532 	if (r) {
3533 		DRM_ERROR("Failed to add hpd irq id!\n");
3534 		return r;
3535 	}
3536 
3537 	register_hpd_handlers(adev);
3538 
3539 	return 0;
3540 }
3541 
3542 /* Register IRQ sources and initialize IRQ callbacks */
3543 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3544 {
3545 	struct dc *dc = adev->dm.dc;
3546 	struct common_irq_params *c_irq_params;
3547 	struct dc_interrupt_params int_params = {0};
3548 	int r;
3549 	int i;
3550 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3551 	static const unsigned int vrtl_int_srcid[] = {
3552 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3554 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3555 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3556 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3557 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3558 	};
3559 #endif
3560 
3561 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3562 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3563 
3564 	/*
3565 	 * Actions of amdgpu_irq_add_id():
3566 	 * 1. Register a set() function with base driver.
3567 	 *    Base driver will call set() function to enable/disable an
3568 	 *    interrupt in DC hardware.
3569 	 * 2. Register amdgpu_dm_irq_handler().
3570 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3571 	 *    coming from DC hardware.
3572 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3573 	 *    for acknowledging and handling.
3574 	 */
3575 
3576 	/* Use VSTARTUP interrupt */
3577 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3578 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3579 			i++) {
3580 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3581 
3582 		if (r) {
3583 			DRM_ERROR("Failed to add crtc irq id!\n");
3584 			return r;
3585 		}
3586 
3587 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3588 		int_params.irq_source =
3589 			dc_interrupt_to_irq_source(dc, i, 0);
3590 
3591 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3592 
3593 		c_irq_params->adev = adev;
3594 		c_irq_params->irq_src = int_params.irq_source;
3595 
3596 		amdgpu_dm_irq_register_interrupt(
3597 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3598 	}
3599 
3600 	/* Use otg vertical line interrupt */
3601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3602 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3603 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3604 				vrtl_int_srcid[i], &adev->vline0_irq);
3605 
3606 		if (r) {
3607 			DRM_ERROR("Failed to add vline0 irq id!\n");
3608 			return r;
3609 		}
3610 
3611 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3612 		int_params.irq_source =
3613 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3614 
3615 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3616 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3617 			break;
3618 		}
3619 
3620 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3621 					- DC_IRQ_SOURCE_DC1_VLINE0];
3622 
3623 		c_irq_params->adev = adev;
3624 		c_irq_params->irq_src = int_params.irq_source;
3625 
3626 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3627 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3628 	}
3629 #endif
3630 
3631 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3632 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3633 	 * to trigger at end of each vblank, regardless of state of the lock,
3634 	 * matching DCE behaviour.
3635 	 */
3636 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3637 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3638 	     i++) {
3639 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3640 
3641 		if (r) {
3642 			DRM_ERROR("Failed to add vupdate irq id!\n");
3643 			return r;
3644 		}
3645 
3646 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3647 		int_params.irq_source =
3648 			dc_interrupt_to_irq_source(dc, i, 0);
3649 
3650 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3651 
3652 		c_irq_params->adev = adev;
3653 		c_irq_params->irq_src = int_params.irq_source;
3654 
3655 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3656 				dm_vupdate_high_irq, c_irq_params);
3657 	}
3658 
3659 	/* Use GRPH_PFLIP interrupt */
3660 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3661 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3662 			i++) {
3663 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3664 		if (r) {
3665 			DRM_ERROR("Failed to add page flip irq id!\n");
3666 			return r;
3667 		}
3668 
3669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3670 		int_params.irq_source =
3671 			dc_interrupt_to_irq_source(dc, i, 0);
3672 
3673 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3674 
3675 		c_irq_params->adev = adev;
3676 		c_irq_params->irq_src = int_params.irq_source;
3677 
3678 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3679 				dm_pflip_high_irq, c_irq_params);
3680 
3681 	}
3682 
3683 	/* HPD */
3684 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3685 			&adev->hpd_irq);
3686 	if (r) {
3687 		DRM_ERROR("Failed to add hpd irq id!\n");
3688 		return r;
3689 	}
3690 
3691 	register_hpd_handlers(adev);
3692 
3693 	return 0;
3694 }
3695 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3696 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3697 {
3698 	struct dc *dc = adev->dm.dc;
3699 	struct common_irq_params *c_irq_params;
3700 	struct dc_interrupt_params int_params = {0};
3701 	int r, i;
3702 
3703 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3704 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3705 
3706 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3707 			&adev->dmub_outbox_irq);
3708 	if (r) {
3709 		DRM_ERROR("Failed to add outbox irq id!\n");
3710 		return r;
3711 	}
3712 
3713 	if (dc->ctx->dmub_srv) {
3714 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3715 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3716 		int_params.irq_source =
3717 		dc_interrupt_to_irq_source(dc, i, 0);
3718 
3719 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3720 
3721 		c_irq_params->adev = adev;
3722 		c_irq_params->irq_src = int_params.irq_source;
3723 
3724 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3725 				dm_dmub_outbox1_low_irq, c_irq_params);
3726 	}
3727 
3728 	return 0;
3729 }
3730 
3731 /*
3732  * Acquires the lock for the atomic state object and returns
3733  * the new atomic state.
3734  *
3735  * This should only be called during atomic check.
3736  */
3737 int dm_atomic_get_state(struct drm_atomic_state *state,
3738 			struct dm_atomic_state **dm_state)
3739 {
3740 	struct drm_device *dev = state->dev;
3741 	struct amdgpu_device *adev = drm_to_adev(dev);
3742 	struct amdgpu_display_manager *dm = &adev->dm;
3743 	struct drm_private_state *priv_state;
3744 
3745 	if (*dm_state)
3746 		return 0;
3747 
3748 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3749 	if (IS_ERR(priv_state))
3750 		return PTR_ERR(priv_state);
3751 
3752 	*dm_state = to_dm_atomic_state(priv_state);
3753 
3754 	return 0;
3755 }
3756 
3757 static struct dm_atomic_state *
3758 dm_atomic_get_new_state(struct drm_atomic_state *state)
3759 {
3760 	struct drm_device *dev = state->dev;
3761 	struct amdgpu_device *adev = drm_to_adev(dev);
3762 	struct amdgpu_display_manager *dm = &adev->dm;
3763 	struct drm_private_obj *obj;
3764 	struct drm_private_state *new_obj_state;
3765 	int i;
3766 
3767 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3768 		if (obj->funcs == dm->atomic_obj.funcs)
3769 			return to_dm_atomic_state(new_obj_state);
3770 	}
3771 
3772 	return NULL;
3773 }
3774 
3775 static struct drm_private_state *
3776 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3777 {
3778 	struct dm_atomic_state *old_state, *new_state;
3779 
3780 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3781 	if (!new_state)
3782 		return NULL;
3783 
3784 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3785 
3786 	old_state = to_dm_atomic_state(obj->state);
3787 
3788 	if (old_state && old_state->context)
3789 		new_state->context = dc_copy_state(old_state->context);
3790 
3791 	if (!new_state->context) {
3792 		kfree(new_state);
3793 		return NULL;
3794 	}
3795 
3796 	return &new_state->base;
3797 }
3798 
3799 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3800 				    struct drm_private_state *state)
3801 {
3802 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3803 
3804 	if (dm_state && dm_state->context)
3805 		dc_release_state(dm_state->context);
3806 
3807 	kfree(dm_state);
3808 }
3809 
3810 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3811 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3812 	.atomic_destroy_state = dm_atomic_destroy_state,
3813 };
3814 
3815 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3816 {
3817 	struct dm_atomic_state *state;
3818 	int r;
3819 
3820 	adev->mode_info.mode_config_initialized = true;
3821 
3822 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3823 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3824 
3825 	adev_to_drm(adev)->mode_config.max_width = 16384;
3826 	adev_to_drm(adev)->mode_config.max_height = 16384;
3827 
3828 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3829 	/* disable prefer shadow for now due to hibernation issues */
3830 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3831 	/* indicates support for immediate flip */
3832 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3833 
3834 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3835 
3836 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3837 	if (!state)
3838 		return -ENOMEM;
3839 
3840 	state->context = dc_create_state(adev->dm.dc);
3841 	if (!state->context) {
3842 		kfree(state);
3843 		return -ENOMEM;
3844 	}
3845 
3846 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3847 
3848 	drm_atomic_private_obj_init(adev_to_drm(adev),
3849 				    &adev->dm.atomic_obj,
3850 				    &state->base,
3851 				    &dm_atomic_state_funcs);
3852 
3853 	r = amdgpu_display_modeset_create_props(adev);
3854 	if (r) {
3855 		dc_release_state(state->context);
3856 		kfree(state);
3857 		return r;
3858 	}
3859 
3860 	r = amdgpu_dm_audio_init(adev);
3861 	if (r) {
3862 		dc_release_state(state->context);
3863 		kfree(state);
3864 		return r;
3865 	}
3866 
3867 	return 0;
3868 }
3869 
3870 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3871 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3872 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3873 
3874 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3875 					    int bl_idx)
3876 {
3877 #if defined(CONFIG_ACPI)
3878 	struct amdgpu_dm_backlight_caps caps;
3879 
3880 	memset(&caps, 0, sizeof(caps));
3881 
3882 	if (dm->backlight_caps[bl_idx].caps_valid)
3883 		return;
3884 
3885 	amdgpu_acpi_get_backlight_caps(&caps);
3886 	if (caps.caps_valid) {
3887 		dm->backlight_caps[bl_idx].caps_valid = true;
3888 		if (caps.aux_support)
3889 			return;
3890 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3891 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3892 	} else {
3893 		dm->backlight_caps[bl_idx].min_input_signal =
3894 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3895 		dm->backlight_caps[bl_idx].max_input_signal =
3896 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 	}
3898 #else
3899 	if (dm->backlight_caps[bl_idx].aux_support)
3900 		return;
3901 
3902 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3903 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3904 #endif
3905 }
3906 
3907 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3908 				unsigned *min, unsigned *max)
3909 {
3910 	if (!caps)
3911 		return 0;
3912 
3913 	if (caps->aux_support) {
3914 		// Firmware limits are in nits, DC API wants millinits.
3915 		*max = 1000 * caps->aux_max_input_signal;
3916 		*min = 1000 * caps->aux_min_input_signal;
3917 	} else {
3918 		// Firmware limits are 8-bit, PWM control is 16-bit.
3919 		*max = 0x101 * caps->max_input_signal;
3920 		*min = 0x101 * caps->min_input_signal;
3921 	}
3922 	return 1;
3923 }
3924 
3925 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3926 					uint32_t brightness)
3927 {
3928 	unsigned min, max;
3929 
3930 	if (!get_brightness_range(caps, &min, &max))
3931 		return brightness;
3932 
3933 	// Rescale 0..255 to min..max
3934 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3935 				       AMDGPU_MAX_BL_LEVEL);
3936 }
3937 
3938 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3939 				      uint32_t brightness)
3940 {
3941 	unsigned min, max;
3942 
3943 	if (!get_brightness_range(caps, &min, &max))
3944 		return brightness;
3945 
3946 	if (brightness < min)
3947 		return 0;
3948 	// Rescale min..max to 0..255
3949 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3950 				 max - min);
3951 }
3952 
3953 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3954 					 int bl_idx,
3955 					 u32 user_brightness)
3956 {
3957 	struct amdgpu_dm_backlight_caps caps;
3958 	struct dc_link *link;
3959 	u32 brightness;
3960 	bool rc;
3961 
3962 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963 	caps = dm->backlight_caps[bl_idx];
3964 
3965 	dm->brightness[bl_idx] = user_brightness;
3966 	/* update scratch register */
3967 	if (bl_idx == 0)
3968 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3969 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3970 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3971 
3972 	/* Change brightness based on AUX property */
3973 	if (caps.aux_support) {
3974 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3975 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3976 		if (!rc)
3977 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3978 	} else {
3979 		rc = dc_link_set_backlight_level(link, brightness, 0);
3980 		if (!rc)
3981 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3982 	}
3983 
3984 	if (rc)
3985 		dm->actual_brightness[bl_idx] = user_brightness;
3986 }
3987 
3988 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3989 {
3990 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3991 	int i;
3992 
3993 	for (i = 0; i < dm->num_of_edps; i++) {
3994 		if (bd == dm->backlight_dev[i])
3995 			break;
3996 	}
3997 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3998 		i = 0;
3999 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4000 
4001 	return 0;
4002 }
4003 
4004 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4005 					 int bl_idx)
4006 {
4007 	struct amdgpu_dm_backlight_caps caps;
4008 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4009 
4010 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4011 	caps = dm->backlight_caps[bl_idx];
4012 
4013 	if (caps.aux_support) {
4014 		u32 avg, peak;
4015 		bool rc;
4016 
4017 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4018 		if (!rc)
4019 			return dm->brightness[bl_idx];
4020 		return convert_brightness_to_user(&caps, avg);
4021 	} else {
4022 		int ret = dc_link_get_backlight_level(link);
4023 
4024 		if (ret == DC_ERROR_UNEXPECTED)
4025 			return dm->brightness[bl_idx];
4026 		return convert_brightness_to_user(&caps, ret);
4027 	}
4028 }
4029 
4030 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4031 {
4032 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4033 	int i;
4034 
4035 	for (i = 0; i < dm->num_of_edps; i++) {
4036 		if (bd == dm->backlight_dev[i])
4037 			break;
4038 	}
4039 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4040 		i = 0;
4041 	return amdgpu_dm_backlight_get_level(dm, i);
4042 }
4043 
4044 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4045 	.options = BL_CORE_SUSPENDRESUME,
4046 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4047 	.update_status	= amdgpu_dm_backlight_update_status,
4048 };
4049 
4050 static void
4051 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4052 {
4053 	char bl_name[16];
4054 	struct backlight_properties props = { 0 };
4055 
4056 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4057 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4058 
4059 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4060 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4061 	props.type = BACKLIGHT_RAW;
4062 
4063 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4064 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4065 
4066 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4067 								       adev_to_drm(dm->adev)->dev,
4068 								       dm,
4069 								       &amdgpu_dm_backlight_ops,
4070 								       &props);
4071 
4072 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4073 		DRM_ERROR("DM: Backlight registration failed!\n");
4074 	else
4075 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4076 }
4077 
4078 static int initialize_plane(struct amdgpu_display_manager *dm,
4079 			    struct amdgpu_mode_info *mode_info, int plane_id,
4080 			    enum drm_plane_type plane_type,
4081 			    const struct dc_plane_cap *plane_cap)
4082 {
4083 	struct drm_plane *plane;
4084 	unsigned long possible_crtcs;
4085 	int ret = 0;
4086 
4087 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4088 	if (!plane) {
4089 		DRM_ERROR("KMS: Failed to allocate plane\n");
4090 		return -ENOMEM;
4091 	}
4092 	plane->type = plane_type;
4093 
4094 	/*
4095 	 * HACK: IGT tests expect that the primary plane for a CRTC
4096 	 * can only have one possible CRTC. Only expose support for
4097 	 * any CRTC if they're not going to be used as a primary plane
4098 	 * for a CRTC - like overlay or underlay planes.
4099 	 */
4100 	possible_crtcs = 1 << plane_id;
4101 	if (plane_id >= dm->dc->caps.max_streams)
4102 		possible_crtcs = 0xff;
4103 
4104 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4105 
4106 	if (ret) {
4107 		DRM_ERROR("KMS: Failed to initialize plane\n");
4108 		kfree(plane);
4109 		return ret;
4110 	}
4111 
4112 	if (mode_info)
4113 		mode_info->planes[plane_id] = plane;
4114 
4115 	return ret;
4116 }
4117 
4118 
4119 static void register_backlight_device(struct amdgpu_display_manager *dm,
4120 				      struct dc_link *link)
4121 {
4122 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4123 	    link->type != dc_connection_none) {
4124 		/*
4125 		 * Event if registration failed, we should continue with
4126 		 * DM initialization because not having a backlight control
4127 		 * is better then a black screen.
4128 		 */
4129 		if (!dm->backlight_dev[dm->num_of_edps])
4130 			amdgpu_dm_register_backlight_device(dm);
4131 
4132 		if (dm->backlight_dev[dm->num_of_edps]) {
4133 			dm->backlight_link[dm->num_of_edps] = link;
4134 			dm->num_of_edps++;
4135 		}
4136 	}
4137 }
4138 
4139 
4140 /*
4141  * In this architecture, the association
4142  * connector -> encoder -> crtc
4143  * id not really requried. The crtc and connector will hold the
4144  * display_index as an abstraction to use with DAL component
4145  *
4146  * Returns 0 on success
4147  */
4148 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4149 {
4150 	struct amdgpu_display_manager *dm = &adev->dm;
4151 	int32_t i;
4152 	struct amdgpu_dm_connector *aconnector = NULL;
4153 	struct amdgpu_encoder *aencoder = NULL;
4154 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4155 	uint32_t link_cnt;
4156 	int32_t primary_planes;
4157 	enum dc_connection_type new_connection_type = dc_connection_none;
4158 	const struct dc_plane_cap *plane;
4159 	bool psr_feature_enabled = false;
4160 
4161 	dm->display_indexes_num = dm->dc->caps.max_streams;
4162 	/* Update the actual used number of crtc */
4163 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4164 
4165 	link_cnt = dm->dc->caps.max_links;
4166 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4167 		DRM_ERROR("DM: Failed to initialize mode config\n");
4168 		return -EINVAL;
4169 	}
4170 
4171 	/* There is one primary plane per CRTC */
4172 	primary_planes = dm->dc->caps.max_streams;
4173 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4174 
4175 	/*
4176 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4177 	 * Order is reversed to match iteration order in atomic check.
4178 	 */
4179 	for (i = (primary_planes - 1); i >= 0; i--) {
4180 		plane = &dm->dc->caps.planes[i];
4181 
4182 		if (initialize_plane(dm, mode_info, i,
4183 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4184 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4185 			goto fail;
4186 		}
4187 	}
4188 
4189 	/*
4190 	 * Initialize overlay planes, index starting after primary planes.
4191 	 * These planes have a higher DRM index than the primary planes since
4192 	 * they should be considered as having a higher z-order.
4193 	 * Order is reversed to match iteration order in atomic check.
4194 	 *
4195 	 * Only support DCN for now, and only expose one so we don't encourage
4196 	 * userspace to use up all the pipes.
4197 	 */
4198 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4199 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4200 
4201 		/* Do not create overlay if MPO disabled */
4202 		if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4203 			break;
4204 
4205 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4206 			continue;
4207 
4208 		if (!plane->blends_with_above || !plane->blends_with_below)
4209 			continue;
4210 
4211 		if (!plane->pixel_format_support.argb8888)
4212 			continue;
4213 
4214 		if (initialize_plane(dm, NULL, primary_planes + i,
4215 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4216 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4217 			goto fail;
4218 		}
4219 
4220 		/* Only create one overlay plane. */
4221 		break;
4222 	}
4223 
4224 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4225 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4226 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4227 			goto fail;
4228 		}
4229 
4230 	/* Use Outbox interrupt */
4231 	switch (adev->ip_versions[DCE_HWIP][0]) {
4232 	case IP_VERSION(3, 0, 0):
4233 	case IP_VERSION(3, 1, 2):
4234 	case IP_VERSION(3, 1, 3):
4235 	case IP_VERSION(3, 1, 4):
4236 	case IP_VERSION(3, 1, 5):
4237 	case IP_VERSION(3, 1, 6):
4238 	case IP_VERSION(3, 2, 0):
4239 	case IP_VERSION(3, 2, 1):
4240 	case IP_VERSION(2, 1, 0):
4241 		if (register_outbox_irq_handlers(dm->adev)) {
4242 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4243 			goto fail;
4244 		}
4245 		break;
4246 	default:
4247 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4248 			      adev->ip_versions[DCE_HWIP][0]);
4249 	}
4250 
4251 	/* Determine whether to enable PSR support by default. */
4252 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4253 		switch (adev->ip_versions[DCE_HWIP][0]) {
4254 		case IP_VERSION(3, 1, 2):
4255 		case IP_VERSION(3, 1, 3):
4256 		case IP_VERSION(3, 1, 4):
4257 		case IP_VERSION(3, 1, 5):
4258 		case IP_VERSION(3, 1, 6):
4259 		case IP_VERSION(3, 2, 0):
4260 		case IP_VERSION(3, 2, 1):
4261 			psr_feature_enabled = true;
4262 			break;
4263 		default:
4264 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4265 			break;
4266 		}
4267 	}
4268 
4269 	/* loops over all connectors on the board */
4270 	for (i = 0; i < link_cnt; i++) {
4271 		struct dc_link *link = NULL;
4272 
4273 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4274 			DRM_ERROR(
4275 				"KMS: Cannot support more than %d display indexes\n",
4276 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4277 			continue;
4278 		}
4279 
4280 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4281 		if (!aconnector)
4282 			goto fail;
4283 
4284 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4285 		if (!aencoder)
4286 			goto fail;
4287 
4288 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4289 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4290 			goto fail;
4291 		}
4292 
4293 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4294 			DRM_ERROR("KMS: Failed to initialize connector\n");
4295 			goto fail;
4296 		}
4297 
4298 		link = dc_get_link_at_index(dm->dc, i);
4299 
4300 		if (!dc_link_detect_sink(link, &new_connection_type))
4301 			DRM_ERROR("KMS: Failed to detect connector\n");
4302 
4303 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4304 			emulated_link_detect(link);
4305 			amdgpu_dm_update_connector_after_detect(aconnector);
4306 		} else {
4307 			bool ret = false;
4308 
4309 			mutex_lock(&dm->dc_lock);
4310 			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4311 			mutex_unlock(&dm->dc_lock);
4312 
4313 			if (ret) {
4314 				amdgpu_dm_update_connector_after_detect(aconnector);
4315 				register_backlight_device(dm, link);
4316 
4317 				if (dm->num_of_edps)
4318 					update_connector_ext_caps(aconnector);
4319 
4320 				if (psr_feature_enabled)
4321 					amdgpu_dm_set_psr_caps(link);
4322 
4323 				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4324 				 * PSR is also supported.
4325 				 */
4326 				if (link->psr_settings.psr_feature_enabled)
4327 					adev_to_drm(adev)->vblank_disable_immediate = false;
4328 			}
4329 		}
4330 	}
4331 
4332 	/* Software is initialized. Now we can register interrupt handlers. */
4333 	switch (adev->asic_type) {
4334 #if defined(CONFIG_DRM_AMD_DC_SI)
4335 	case CHIP_TAHITI:
4336 	case CHIP_PITCAIRN:
4337 	case CHIP_VERDE:
4338 	case CHIP_OLAND:
4339 		if (dce60_register_irq_handlers(dm->adev)) {
4340 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4341 			goto fail;
4342 		}
4343 		break;
4344 #endif
4345 	case CHIP_BONAIRE:
4346 	case CHIP_HAWAII:
4347 	case CHIP_KAVERI:
4348 	case CHIP_KABINI:
4349 	case CHIP_MULLINS:
4350 	case CHIP_TONGA:
4351 	case CHIP_FIJI:
4352 	case CHIP_CARRIZO:
4353 	case CHIP_STONEY:
4354 	case CHIP_POLARIS11:
4355 	case CHIP_POLARIS10:
4356 	case CHIP_POLARIS12:
4357 	case CHIP_VEGAM:
4358 	case CHIP_VEGA10:
4359 	case CHIP_VEGA12:
4360 	case CHIP_VEGA20:
4361 		if (dce110_register_irq_handlers(dm->adev)) {
4362 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4363 			goto fail;
4364 		}
4365 		break;
4366 	default:
4367 		switch (adev->ip_versions[DCE_HWIP][0]) {
4368 		case IP_VERSION(1, 0, 0):
4369 		case IP_VERSION(1, 0, 1):
4370 		case IP_VERSION(2, 0, 2):
4371 		case IP_VERSION(2, 0, 3):
4372 		case IP_VERSION(2, 0, 0):
4373 		case IP_VERSION(2, 1, 0):
4374 		case IP_VERSION(3, 0, 0):
4375 		case IP_VERSION(3, 0, 2):
4376 		case IP_VERSION(3, 0, 3):
4377 		case IP_VERSION(3, 0, 1):
4378 		case IP_VERSION(3, 1, 2):
4379 		case IP_VERSION(3, 1, 3):
4380 		case IP_VERSION(3, 1, 4):
4381 		case IP_VERSION(3, 1, 5):
4382 		case IP_VERSION(3, 1, 6):
4383 		case IP_VERSION(3, 2, 0):
4384 		case IP_VERSION(3, 2, 1):
4385 			if (dcn10_register_irq_handlers(dm->adev)) {
4386 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4387 				goto fail;
4388 			}
4389 			break;
4390 		default:
4391 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4392 					adev->ip_versions[DCE_HWIP][0]);
4393 			goto fail;
4394 		}
4395 		break;
4396 	}
4397 
4398 	return 0;
4399 fail:
4400 	kfree(aencoder);
4401 	kfree(aconnector);
4402 
4403 	return -EINVAL;
4404 }
4405 
4406 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4407 {
4408 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4409 	return;
4410 }
4411 
4412 /******************************************************************************
4413  * amdgpu_display_funcs functions
4414  *****************************************************************************/
4415 
4416 /*
4417  * dm_bandwidth_update - program display watermarks
4418  *
4419  * @adev: amdgpu_device pointer
4420  *
4421  * Calculate and program the display watermarks and line buffer allocation.
4422  */
4423 static void dm_bandwidth_update(struct amdgpu_device *adev)
4424 {
4425 	/* TODO: implement later */
4426 }
4427 
4428 static const struct amdgpu_display_funcs dm_display_funcs = {
4429 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4430 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4431 	.backlight_set_level = NULL, /* never called for DC */
4432 	.backlight_get_level = NULL, /* never called for DC */
4433 	.hpd_sense = NULL,/* called unconditionally */
4434 	.hpd_set_polarity = NULL, /* called unconditionally */
4435 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4436 	.page_flip_get_scanoutpos =
4437 		dm_crtc_get_scanoutpos,/* called unconditionally */
4438 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4439 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4440 };
4441 
4442 #if defined(CONFIG_DEBUG_KERNEL_DC)
4443 
4444 static ssize_t s3_debug_store(struct device *device,
4445 			      struct device_attribute *attr,
4446 			      const char *buf,
4447 			      size_t count)
4448 {
4449 	int ret;
4450 	int s3_state;
4451 	struct drm_device *drm_dev = dev_get_drvdata(device);
4452 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4453 
4454 	ret = kstrtoint(buf, 0, &s3_state);
4455 
4456 	if (ret == 0) {
4457 		if (s3_state) {
4458 			dm_resume(adev);
4459 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4460 		} else
4461 			dm_suspend(adev);
4462 	}
4463 
4464 	return ret == 0 ? count : 0;
4465 }
4466 
4467 DEVICE_ATTR_WO(s3_debug);
4468 
4469 #endif
4470 
4471 static int dm_early_init(void *handle)
4472 {
4473 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4474 
4475 	switch (adev->asic_type) {
4476 #if defined(CONFIG_DRM_AMD_DC_SI)
4477 	case CHIP_TAHITI:
4478 	case CHIP_PITCAIRN:
4479 	case CHIP_VERDE:
4480 		adev->mode_info.num_crtc = 6;
4481 		adev->mode_info.num_hpd = 6;
4482 		adev->mode_info.num_dig = 6;
4483 		break;
4484 	case CHIP_OLAND:
4485 		adev->mode_info.num_crtc = 2;
4486 		adev->mode_info.num_hpd = 2;
4487 		adev->mode_info.num_dig = 2;
4488 		break;
4489 #endif
4490 	case CHIP_BONAIRE:
4491 	case CHIP_HAWAII:
4492 		adev->mode_info.num_crtc = 6;
4493 		adev->mode_info.num_hpd = 6;
4494 		adev->mode_info.num_dig = 6;
4495 		break;
4496 	case CHIP_KAVERI:
4497 		adev->mode_info.num_crtc = 4;
4498 		adev->mode_info.num_hpd = 6;
4499 		adev->mode_info.num_dig = 7;
4500 		break;
4501 	case CHIP_KABINI:
4502 	case CHIP_MULLINS:
4503 		adev->mode_info.num_crtc = 2;
4504 		adev->mode_info.num_hpd = 6;
4505 		adev->mode_info.num_dig = 6;
4506 		break;
4507 	case CHIP_FIJI:
4508 	case CHIP_TONGA:
4509 		adev->mode_info.num_crtc = 6;
4510 		adev->mode_info.num_hpd = 6;
4511 		adev->mode_info.num_dig = 7;
4512 		break;
4513 	case CHIP_CARRIZO:
4514 		adev->mode_info.num_crtc = 3;
4515 		adev->mode_info.num_hpd = 6;
4516 		adev->mode_info.num_dig = 9;
4517 		break;
4518 	case CHIP_STONEY:
4519 		adev->mode_info.num_crtc = 2;
4520 		adev->mode_info.num_hpd = 6;
4521 		adev->mode_info.num_dig = 9;
4522 		break;
4523 	case CHIP_POLARIS11:
4524 	case CHIP_POLARIS12:
4525 		adev->mode_info.num_crtc = 5;
4526 		adev->mode_info.num_hpd = 5;
4527 		adev->mode_info.num_dig = 5;
4528 		break;
4529 	case CHIP_POLARIS10:
4530 	case CHIP_VEGAM:
4531 		adev->mode_info.num_crtc = 6;
4532 		adev->mode_info.num_hpd = 6;
4533 		adev->mode_info.num_dig = 6;
4534 		break;
4535 	case CHIP_VEGA10:
4536 	case CHIP_VEGA12:
4537 	case CHIP_VEGA20:
4538 		adev->mode_info.num_crtc = 6;
4539 		adev->mode_info.num_hpd = 6;
4540 		adev->mode_info.num_dig = 6;
4541 		break;
4542 	default:
4543 
4544 		switch (adev->ip_versions[DCE_HWIP][0]) {
4545 		case IP_VERSION(2, 0, 2):
4546 		case IP_VERSION(3, 0, 0):
4547 			adev->mode_info.num_crtc = 6;
4548 			adev->mode_info.num_hpd = 6;
4549 			adev->mode_info.num_dig = 6;
4550 			break;
4551 		case IP_VERSION(2, 0, 0):
4552 		case IP_VERSION(3, 0, 2):
4553 			adev->mode_info.num_crtc = 5;
4554 			adev->mode_info.num_hpd = 5;
4555 			adev->mode_info.num_dig = 5;
4556 			break;
4557 		case IP_VERSION(2, 0, 3):
4558 		case IP_VERSION(3, 0, 3):
4559 			adev->mode_info.num_crtc = 2;
4560 			adev->mode_info.num_hpd = 2;
4561 			adev->mode_info.num_dig = 2;
4562 			break;
4563 		case IP_VERSION(1, 0, 0):
4564 		case IP_VERSION(1, 0, 1):
4565 		case IP_VERSION(3, 0, 1):
4566 		case IP_VERSION(2, 1, 0):
4567 		case IP_VERSION(3, 1, 2):
4568 		case IP_VERSION(3, 1, 3):
4569 		case IP_VERSION(3, 1, 4):
4570 		case IP_VERSION(3, 1, 5):
4571 		case IP_VERSION(3, 1, 6):
4572 		case IP_VERSION(3, 2, 0):
4573 		case IP_VERSION(3, 2, 1):
4574 			adev->mode_info.num_crtc = 4;
4575 			adev->mode_info.num_hpd = 4;
4576 			adev->mode_info.num_dig = 4;
4577 			break;
4578 		default:
4579 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4580 					adev->ip_versions[DCE_HWIP][0]);
4581 			return -EINVAL;
4582 		}
4583 		break;
4584 	}
4585 
4586 	amdgpu_dm_set_irq_funcs(adev);
4587 
4588 	if (adev->mode_info.funcs == NULL)
4589 		adev->mode_info.funcs = &dm_display_funcs;
4590 
4591 	/*
4592 	 * Note: Do NOT change adev->audio_endpt_rreg and
4593 	 * adev->audio_endpt_wreg because they are initialised in
4594 	 * amdgpu_device_init()
4595 	 */
4596 #if defined(CONFIG_DEBUG_KERNEL_DC)
4597 	device_create_file(
4598 		adev_to_drm(adev)->dev,
4599 		&dev_attr_s3_debug);
4600 #endif
4601 
4602 	return 0;
4603 }
4604 
4605 static bool modereset_required(struct drm_crtc_state *crtc_state)
4606 {
4607 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4608 }
4609 
4610 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4611 {
4612 	drm_encoder_cleanup(encoder);
4613 	kfree(encoder);
4614 }
4615 
4616 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4617 	.destroy = amdgpu_dm_encoder_destroy,
4618 };
4619 
4620 static int
4621 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4622 			    const enum surface_pixel_format format,
4623 			    enum dc_color_space *color_space)
4624 {
4625 	bool full_range;
4626 
4627 	*color_space = COLOR_SPACE_SRGB;
4628 
4629 	/* DRM color properties only affect non-RGB formats. */
4630 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4631 		return 0;
4632 
4633 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4634 
4635 	switch (plane_state->color_encoding) {
4636 	case DRM_COLOR_YCBCR_BT601:
4637 		if (full_range)
4638 			*color_space = COLOR_SPACE_YCBCR601;
4639 		else
4640 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4641 		break;
4642 
4643 	case DRM_COLOR_YCBCR_BT709:
4644 		if (full_range)
4645 			*color_space = COLOR_SPACE_YCBCR709;
4646 		else
4647 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4648 		break;
4649 
4650 	case DRM_COLOR_YCBCR_BT2020:
4651 		if (full_range)
4652 			*color_space = COLOR_SPACE_2020_YCBCR;
4653 		else
4654 			return -EINVAL;
4655 		break;
4656 
4657 	default:
4658 		return -EINVAL;
4659 	}
4660 
4661 	return 0;
4662 }
4663 
4664 static int
4665 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4666 			    const struct drm_plane_state *plane_state,
4667 			    const uint64_t tiling_flags,
4668 			    struct dc_plane_info *plane_info,
4669 			    struct dc_plane_address *address,
4670 			    bool tmz_surface,
4671 			    bool force_disable_dcc)
4672 {
4673 	const struct drm_framebuffer *fb = plane_state->fb;
4674 	const struct amdgpu_framebuffer *afb =
4675 		to_amdgpu_framebuffer(plane_state->fb);
4676 	int ret;
4677 
4678 	memset(plane_info, 0, sizeof(*plane_info));
4679 
4680 	switch (fb->format->format) {
4681 	case DRM_FORMAT_C8:
4682 		plane_info->format =
4683 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4684 		break;
4685 	case DRM_FORMAT_RGB565:
4686 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4687 		break;
4688 	case DRM_FORMAT_XRGB8888:
4689 	case DRM_FORMAT_ARGB8888:
4690 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4691 		break;
4692 	case DRM_FORMAT_XRGB2101010:
4693 	case DRM_FORMAT_ARGB2101010:
4694 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4695 		break;
4696 	case DRM_FORMAT_XBGR2101010:
4697 	case DRM_FORMAT_ABGR2101010:
4698 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4699 		break;
4700 	case DRM_FORMAT_XBGR8888:
4701 	case DRM_FORMAT_ABGR8888:
4702 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4703 		break;
4704 	case DRM_FORMAT_NV21:
4705 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4706 		break;
4707 	case DRM_FORMAT_NV12:
4708 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4709 		break;
4710 	case DRM_FORMAT_P010:
4711 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4712 		break;
4713 	case DRM_FORMAT_XRGB16161616F:
4714 	case DRM_FORMAT_ARGB16161616F:
4715 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4716 		break;
4717 	case DRM_FORMAT_XBGR16161616F:
4718 	case DRM_FORMAT_ABGR16161616F:
4719 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4720 		break;
4721 	case DRM_FORMAT_XRGB16161616:
4722 	case DRM_FORMAT_ARGB16161616:
4723 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4724 		break;
4725 	case DRM_FORMAT_XBGR16161616:
4726 	case DRM_FORMAT_ABGR16161616:
4727 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4728 		break;
4729 	default:
4730 		DRM_ERROR(
4731 			"Unsupported screen format %p4cc\n",
4732 			&fb->format->format);
4733 		return -EINVAL;
4734 	}
4735 
4736 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4737 	case DRM_MODE_ROTATE_0:
4738 		plane_info->rotation = ROTATION_ANGLE_0;
4739 		break;
4740 	case DRM_MODE_ROTATE_90:
4741 		plane_info->rotation = ROTATION_ANGLE_90;
4742 		break;
4743 	case DRM_MODE_ROTATE_180:
4744 		plane_info->rotation = ROTATION_ANGLE_180;
4745 		break;
4746 	case DRM_MODE_ROTATE_270:
4747 		plane_info->rotation = ROTATION_ANGLE_270;
4748 		break;
4749 	default:
4750 		plane_info->rotation = ROTATION_ANGLE_0;
4751 		break;
4752 	}
4753 
4754 
4755 	plane_info->visible = true;
4756 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4757 
4758 	plane_info->layer_index = 0;
4759 
4760 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4761 					  &plane_info->color_space);
4762 	if (ret)
4763 		return ret;
4764 
4765 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4766 					   plane_info->rotation, tiling_flags,
4767 					   &plane_info->tiling_info,
4768 					   &plane_info->plane_size,
4769 					   &plane_info->dcc, address,
4770 					   tmz_surface, force_disable_dcc);
4771 	if (ret)
4772 		return ret;
4773 
4774 	fill_blending_from_plane_state(
4775 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4776 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4777 
4778 	return 0;
4779 }
4780 
4781 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4782 				    struct dc_plane_state *dc_plane_state,
4783 				    struct drm_plane_state *plane_state,
4784 				    struct drm_crtc_state *crtc_state)
4785 {
4786 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4787 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4788 	struct dc_scaling_info scaling_info;
4789 	struct dc_plane_info plane_info;
4790 	int ret;
4791 	bool force_disable_dcc = false;
4792 
4793 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4794 	if (ret)
4795 		return ret;
4796 
4797 	dc_plane_state->src_rect = scaling_info.src_rect;
4798 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4799 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4800 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4801 
4802 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4803 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4804 					  afb->tiling_flags,
4805 					  &plane_info,
4806 					  &dc_plane_state->address,
4807 					  afb->tmz_surface,
4808 					  force_disable_dcc);
4809 	if (ret)
4810 		return ret;
4811 
4812 	dc_plane_state->format = plane_info.format;
4813 	dc_plane_state->color_space = plane_info.color_space;
4814 	dc_plane_state->format = plane_info.format;
4815 	dc_plane_state->plane_size = plane_info.plane_size;
4816 	dc_plane_state->rotation = plane_info.rotation;
4817 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4818 	dc_plane_state->stereo_format = plane_info.stereo_format;
4819 	dc_plane_state->tiling_info = plane_info.tiling_info;
4820 	dc_plane_state->visible = plane_info.visible;
4821 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4822 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4823 	dc_plane_state->global_alpha = plane_info.global_alpha;
4824 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4825 	dc_plane_state->dcc = plane_info.dcc;
4826 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4827 	dc_plane_state->flip_int_enabled = true;
4828 
4829 	/*
4830 	 * Always set input transfer function, since plane state is refreshed
4831 	 * every time.
4832 	 */
4833 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4834 	if (ret)
4835 		return ret;
4836 
4837 	return 0;
4838 }
4839 
4840 /**
4841  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4842  *
4843  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4844  *         remote fb
4845  * @old_plane_state: Old state of @plane
4846  * @new_plane_state: New state of @plane
4847  * @crtc_state: New state of CRTC connected to the @plane
4848  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4849  *
4850  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4851  * (referred to as "damage clips" in DRM nomenclature) that require updating on
4852  * the eDP remote buffer. The responsibility of specifying the dirty regions is
4853  * amdgpu_dm's.
4854  *
4855  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4856  * plane with regions that require flushing to the eDP remote buffer. In
4857  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4858  * implicitly provide damage clips without any client support via the plane
4859  * bounds.
4860  *
4861  * Today, amdgpu_dm only supports the MPO and cursor usecase.
4862  *
4863  * TODO: Also enable for FB_DAMAGE_CLIPS
4864  */
4865 static void fill_dc_dirty_rects(struct drm_plane *plane,
4866 				struct drm_plane_state *old_plane_state,
4867 				struct drm_plane_state *new_plane_state,
4868 				struct drm_crtc_state *crtc_state,
4869 				struct dc_flip_addrs *flip_addrs)
4870 {
4871 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4872 	struct rect *dirty_rects = flip_addrs->dirty_rects;
4873 	uint32_t num_clips;
4874 	bool bb_changed;
4875 	bool fb_changed;
4876 	uint32_t i = 0;
4877 
4878 	flip_addrs->dirty_rect_count = 0;
4879 
4880 	/*
4881 	 * Cursor plane has it's own dirty rect update interface. See
4882 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4883 	 */
4884 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
4885 		return;
4886 
4887 	/*
4888 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
4889 	 * requested, and there is a plane update, do FFU.
4890 	 */
4891 	if (!dm_crtc_state->mpo_requested) {
4892 		dirty_rects[0].x = 0;
4893 		dirty_rects[0].y = 0;
4894 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4895 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4896 		flip_addrs->dirty_rect_count = 1;
4897 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4898 				 new_plane_state->plane->base.id,
4899 				 dm_crtc_state->base.mode.crtc_hdisplay,
4900 				 dm_crtc_state->base.mode.crtc_vdisplay);
4901 		return;
4902 	}
4903 
4904 	/*
4905 	 * MPO is requested. Add entire plane bounding box to dirty rects if
4906 	 * flipped to or damaged.
4907 	 *
4908 	 * If plane is moved or resized, also add old bounding box to dirty
4909 	 * rects.
4910 	 */
4911 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4912 	fb_changed = old_plane_state->fb->base.id !=
4913 		     new_plane_state->fb->base.id;
4914 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4915 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
4916 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
4917 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
4918 
4919 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4920 			 new_plane_state->plane->base.id,
4921 			 bb_changed, fb_changed, num_clips);
4922 
4923 	if (num_clips || fb_changed || bb_changed) {
4924 		dirty_rects[i].x = new_plane_state->crtc_x;
4925 		dirty_rects[i].y = new_plane_state->crtc_y;
4926 		dirty_rects[i].width = new_plane_state->crtc_w;
4927 		dirty_rects[i].height = new_plane_state->crtc_h;
4928 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4929 				 new_plane_state->plane->base.id,
4930 				 dirty_rects[i].x, dirty_rects[i].y,
4931 				 dirty_rects[i].width, dirty_rects[i].height);
4932 		i += 1;
4933 	}
4934 
4935 	/* Add old plane bounding-box if plane is moved or resized */
4936 	if (bb_changed) {
4937 		dirty_rects[i].x = old_plane_state->crtc_x;
4938 		dirty_rects[i].y = old_plane_state->crtc_y;
4939 		dirty_rects[i].width = old_plane_state->crtc_w;
4940 		dirty_rects[i].height = old_plane_state->crtc_h;
4941 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4942 				old_plane_state->plane->base.id,
4943 				dirty_rects[i].x, dirty_rects[i].y,
4944 				dirty_rects[i].width, dirty_rects[i].height);
4945 		i += 1;
4946 	}
4947 
4948 	flip_addrs->dirty_rect_count = i;
4949 }
4950 
4951 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4952 					   const struct dm_connector_state *dm_state,
4953 					   struct dc_stream_state *stream)
4954 {
4955 	enum amdgpu_rmx_type rmx_type;
4956 
4957 	struct rect src = { 0 }; /* viewport in composition space*/
4958 	struct rect dst = { 0 }; /* stream addressable area */
4959 
4960 	/* no mode. nothing to be done */
4961 	if (!mode)
4962 		return;
4963 
4964 	/* Full screen scaling by default */
4965 	src.width = mode->hdisplay;
4966 	src.height = mode->vdisplay;
4967 	dst.width = stream->timing.h_addressable;
4968 	dst.height = stream->timing.v_addressable;
4969 
4970 	if (dm_state) {
4971 		rmx_type = dm_state->scaling;
4972 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4973 			if (src.width * dst.height <
4974 					src.height * dst.width) {
4975 				/* height needs less upscaling/more downscaling */
4976 				dst.width = src.width *
4977 						dst.height / src.height;
4978 			} else {
4979 				/* width needs less upscaling/more downscaling */
4980 				dst.height = src.height *
4981 						dst.width / src.width;
4982 			}
4983 		} else if (rmx_type == RMX_CENTER) {
4984 			dst = src;
4985 		}
4986 
4987 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4988 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4989 
4990 		if (dm_state->underscan_enable) {
4991 			dst.x += dm_state->underscan_hborder / 2;
4992 			dst.y += dm_state->underscan_vborder / 2;
4993 			dst.width -= dm_state->underscan_hborder;
4994 			dst.height -= dm_state->underscan_vborder;
4995 		}
4996 	}
4997 
4998 	stream->src = src;
4999 	stream->dst = dst;
5000 
5001 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5002 		      dst.x, dst.y, dst.width, dst.height);
5003 
5004 }
5005 
5006 static enum dc_color_depth
5007 convert_color_depth_from_display_info(const struct drm_connector *connector,
5008 				      bool is_y420, int requested_bpc)
5009 {
5010 	uint8_t bpc;
5011 
5012 	if (is_y420) {
5013 		bpc = 8;
5014 
5015 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5016 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5017 			bpc = 16;
5018 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5019 			bpc = 12;
5020 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5021 			bpc = 10;
5022 	} else {
5023 		bpc = (uint8_t)connector->display_info.bpc;
5024 		/* Assume 8 bpc by default if no bpc is specified. */
5025 		bpc = bpc ? bpc : 8;
5026 	}
5027 
5028 	if (requested_bpc > 0) {
5029 		/*
5030 		 * Cap display bpc based on the user requested value.
5031 		 *
5032 		 * The value for state->max_bpc may not correctly updated
5033 		 * depending on when the connector gets added to the state
5034 		 * or if this was called outside of atomic check, so it
5035 		 * can't be used directly.
5036 		 */
5037 		bpc = min_t(u8, bpc, requested_bpc);
5038 
5039 		/* Round down to the nearest even number. */
5040 		bpc = bpc - (bpc & 1);
5041 	}
5042 
5043 	switch (bpc) {
5044 	case 0:
5045 		/*
5046 		 * Temporary Work around, DRM doesn't parse color depth for
5047 		 * EDID revision before 1.4
5048 		 * TODO: Fix edid parsing
5049 		 */
5050 		return COLOR_DEPTH_888;
5051 	case 6:
5052 		return COLOR_DEPTH_666;
5053 	case 8:
5054 		return COLOR_DEPTH_888;
5055 	case 10:
5056 		return COLOR_DEPTH_101010;
5057 	case 12:
5058 		return COLOR_DEPTH_121212;
5059 	case 14:
5060 		return COLOR_DEPTH_141414;
5061 	case 16:
5062 		return COLOR_DEPTH_161616;
5063 	default:
5064 		return COLOR_DEPTH_UNDEFINED;
5065 	}
5066 }
5067 
5068 static enum dc_aspect_ratio
5069 get_aspect_ratio(const struct drm_display_mode *mode_in)
5070 {
5071 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5072 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5073 }
5074 
5075 static enum dc_color_space
5076 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5077 {
5078 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5079 
5080 	switch (dc_crtc_timing->pixel_encoding)	{
5081 	case PIXEL_ENCODING_YCBCR422:
5082 	case PIXEL_ENCODING_YCBCR444:
5083 	case PIXEL_ENCODING_YCBCR420:
5084 	{
5085 		/*
5086 		 * 27030khz is the separation point between HDTV and SDTV
5087 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5088 		 * respectively
5089 		 */
5090 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5091 			if (dc_crtc_timing->flags.Y_ONLY)
5092 				color_space =
5093 					COLOR_SPACE_YCBCR709_LIMITED;
5094 			else
5095 				color_space = COLOR_SPACE_YCBCR709;
5096 		} else {
5097 			if (dc_crtc_timing->flags.Y_ONLY)
5098 				color_space =
5099 					COLOR_SPACE_YCBCR601_LIMITED;
5100 			else
5101 				color_space = COLOR_SPACE_YCBCR601;
5102 		}
5103 
5104 	}
5105 	break;
5106 	case PIXEL_ENCODING_RGB:
5107 		color_space = COLOR_SPACE_SRGB;
5108 		break;
5109 
5110 	default:
5111 		WARN_ON(1);
5112 		break;
5113 	}
5114 
5115 	return color_space;
5116 }
5117 
5118 static bool adjust_colour_depth_from_display_info(
5119 	struct dc_crtc_timing *timing_out,
5120 	const struct drm_display_info *info)
5121 {
5122 	enum dc_color_depth depth = timing_out->display_color_depth;
5123 	int normalized_clk;
5124 	do {
5125 		normalized_clk = timing_out->pix_clk_100hz / 10;
5126 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5127 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5128 			normalized_clk /= 2;
5129 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5130 		switch (depth) {
5131 		case COLOR_DEPTH_888:
5132 			break;
5133 		case COLOR_DEPTH_101010:
5134 			normalized_clk = (normalized_clk * 30) / 24;
5135 			break;
5136 		case COLOR_DEPTH_121212:
5137 			normalized_clk = (normalized_clk * 36) / 24;
5138 			break;
5139 		case COLOR_DEPTH_161616:
5140 			normalized_clk = (normalized_clk * 48) / 24;
5141 			break;
5142 		default:
5143 			/* The above depths are the only ones valid for HDMI. */
5144 			return false;
5145 		}
5146 		if (normalized_clk <= info->max_tmds_clock) {
5147 			timing_out->display_color_depth = depth;
5148 			return true;
5149 		}
5150 	} while (--depth > COLOR_DEPTH_666);
5151 	return false;
5152 }
5153 
5154 static void fill_stream_properties_from_drm_display_mode(
5155 	struct dc_stream_state *stream,
5156 	const struct drm_display_mode *mode_in,
5157 	const struct drm_connector *connector,
5158 	const struct drm_connector_state *connector_state,
5159 	const struct dc_stream_state *old_stream,
5160 	int requested_bpc)
5161 {
5162 	struct dc_crtc_timing *timing_out = &stream->timing;
5163 	const struct drm_display_info *info = &connector->display_info;
5164 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5165 	struct hdmi_vendor_infoframe hv_frame;
5166 	struct hdmi_avi_infoframe avi_frame;
5167 
5168 	memset(&hv_frame, 0, sizeof(hv_frame));
5169 	memset(&avi_frame, 0, sizeof(avi_frame));
5170 
5171 	timing_out->h_border_left = 0;
5172 	timing_out->h_border_right = 0;
5173 	timing_out->v_border_top = 0;
5174 	timing_out->v_border_bottom = 0;
5175 	/* TODO: un-hardcode */
5176 	if (drm_mode_is_420_only(info, mode_in)
5177 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5178 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5179 	else if (drm_mode_is_420_also(info, mode_in)
5180 			&& aconnector->force_yuv420_output)
5181 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5182 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5183 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5184 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5185 	else
5186 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5187 
5188 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5189 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5190 		connector,
5191 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5192 		requested_bpc);
5193 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5194 	timing_out->hdmi_vic = 0;
5195 
5196 	if (old_stream) {
5197 		timing_out->vic = old_stream->timing.vic;
5198 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5199 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5200 	} else {
5201 		timing_out->vic = drm_match_cea_mode(mode_in);
5202 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5203 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5204 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5205 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5206 	}
5207 
5208 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5209 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5210 		timing_out->vic = avi_frame.video_code;
5211 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5212 		timing_out->hdmi_vic = hv_frame.vic;
5213 	}
5214 
5215 	if (is_freesync_video_mode(mode_in, aconnector)) {
5216 		timing_out->h_addressable = mode_in->hdisplay;
5217 		timing_out->h_total = mode_in->htotal;
5218 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5219 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5220 		timing_out->v_total = mode_in->vtotal;
5221 		timing_out->v_addressable = mode_in->vdisplay;
5222 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5223 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5224 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5225 	} else {
5226 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5227 		timing_out->h_total = mode_in->crtc_htotal;
5228 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5229 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5230 		timing_out->v_total = mode_in->crtc_vtotal;
5231 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5232 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5233 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5234 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5235 	}
5236 
5237 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5238 
5239 	stream->output_color_space = get_output_color_space(timing_out);
5240 
5241 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5242 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5243 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5244 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5245 		    drm_mode_is_420_also(info, mode_in) &&
5246 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5247 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5248 			adjust_colour_depth_from_display_info(timing_out, info);
5249 		}
5250 	}
5251 }
5252 
5253 static void fill_audio_info(struct audio_info *audio_info,
5254 			    const struct drm_connector *drm_connector,
5255 			    const struct dc_sink *dc_sink)
5256 {
5257 	int i = 0;
5258 	int cea_revision = 0;
5259 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5260 
5261 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5262 	audio_info->product_id = edid_caps->product_id;
5263 
5264 	cea_revision = drm_connector->display_info.cea_rev;
5265 
5266 	strscpy(audio_info->display_name,
5267 		edid_caps->display_name,
5268 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5269 
5270 	if (cea_revision >= 3) {
5271 		audio_info->mode_count = edid_caps->audio_mode_count;
5272 
5273 		for (i = 0; i < audio_info->mode_count; ++i) {
5274 			audio_info->modes[i].format_code =
5275 					(enum audio_format_code)
5276 					(edid_caps->audio_modes[i].format_code);
5277 			audio_info->modes[i].channel_count =
5278 					edid_caps->audio_modes[i].channel_count;
5279 			audio_info->modes[i].sample_rates.all =
5280 					edid_caps->audio_modes[i].sample_rate;
5281 			audio_info->modes[i].sample_size =
5282 					edid_caps->audio_modes[i].sample_size;
5283 		}
5284 	}
5285 
5286 	audio_info->flags.all = edid_caps->speaker_flags;
5287 
5288 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5289 	if (drm_connector->latency_present[0]) {
5290 		audio_info->video_latency = drm_connector->video_latency[0];
5291 		audio_info->audio_latency = drm_connector->audio_latency[0];
5292 	}
5293 
5294 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5295 
5296 }
5297 
5298 static void
5299 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5300 				      struct drm_display_mode *dst_mode)
5301 {
5302 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5303 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5304 	dst_mode->crtc_clock = src_mode->crtc_clock;
5305 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5306 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5307 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5308 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5309 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5310 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5311 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5312 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5313 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5314 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5315 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5316 }
5317 
5318 static void
5319 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5320 					const struct drm_display_mode *native_mode,
5321 					bool scale_enabled)
5322 {
5323 	if (scale_enabled) {
5324 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5325 	} else if (native_mode->clock == drm_mode->clock &&
5326 			native_mode->htotal == drm_mode->htotal &&
5327 			native_mode->vtotal == drm_mode->vtotal) {
5328 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5329 	} else {
5330 		/* no scaling nor amdgpu inserted, no need to patch */
5331 	}
5332 }
5333 
5334 static struct dc_sink *
5335 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5336 {
5337 	struct dc_sink_init_data sink_init_data = { 0 };
5338 	struct dc_sink *sink = NULL;
5339 	sink_init_data.link = aconnector->dc_link;
5340 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5341 
5342 	sink = dc_sink_create(&sink_init_data);
5343 	if (!sink) {
5344 		DRM_ERROR("Failed to create sink!\n");
5345 		return NULL;
5346 	}
5347 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5348 
5349 	return sink;
5350 }
5351 
5352 static void set_multisync_trigger_params(
5353 		struct dc_stream_state *stream)
5354 {
5355 	struct dc_stream_state *master = NULL;
5356 
5357 	if (stream->triggered_crtc_reset.enabled) {
5358 		master = stream->triggered_crtc_reset.event_source;
5359 		stream->triggered_crtc_reset.event =
5360 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5361 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5362 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5363 	}
5364 }
5365 
5366 static void set_master_stream(struct dc_stream_state *stream_set[],
5367 			      int stream_count)
5368 {
5369 	int j, highest_rfr = 0, master_stream = 0;
5370 
5371 	for (j = 0;  j < stream_count; j++) {
5372 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5373 			int refresh_rate = 0;
5374 
5375 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5376 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5377 			if (refresh_rate > highest_rfr) {
5378 				highest_rfr = refresh_rate;
5379 				master_stream = j;
5380 			}
5381 		}
5382 	}
5383 	for (j = 0;  j < stream_count; j++) {
5384 		if (stream_set[j])
5385 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5386 	}
5387 }
5388 
5389 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5390 {
5391 	int i = 0;
5392 	struct dc_stream_state *stream;
5393 
5394 	if (context->stream_count < 2)
5395 		return;
5396 	for (i = 0; i < context->stream_count ; i++) {
5397 		if (!context->streams[i])
5398 			continue;
5399 		/*
5400 		 * TODO: add a function to read AMD VSDB bits and set
5401 		 * crtc_sync_master.multi_sync_enabled flag
5402 		 * For now it's set to false
5403 		 */
5404 	}
5405 
5406 	set_master_stream(context->streams, context->stream_count);
5407 
5408 	for (i = 0; i < context->stream_count ; i++) {
5409 		stream = context->streams[i];
5410 
5411 		if (!stream)
5412 			continue;
5413 
5414 		set_multisync_trigger_params(stream);
5415 	}
5416 }
5417 
5418 /**
5419  * DOC: FreeSync Video
5420  *
5421  * When a userspace application wants to play a video, the content follows a
5422  * standard format definition that usually specifies the FPS for that format.
5423  * The below list illustrates some video format and the expected FPS,
5424  * respectively:
5425  *
5426  * - TV/NTSC (23.976 FPS)
5427  * - Cinema (24 FPS)
5428  * - TV/PAL (25 FPS)
5429  * - TV/NTSC (29.97 FPS)
5430  * - TV/NTSC (30 FPS)
5431  * - Cinema HFR (48 FPS)
5432  * - TV/PAL (50 FPS)
5433  * - Commonly used (60 FPS)
5434  * - Multiples of 24 (48,72,96 FPS)
5435  *
5436  * The list of standards video format is not huge and can be added to the
5437  * connector modeset list beforehand. With that, userspace can leverage
5438  * FreeSync to extends the front porch in order to attain the target refresh
5439  * rate. Such a switch will happen seamlessly, without screen blanking or
5440  * reprogramming of the output in any other way. If the userspace requests a
5441  * modesetting change compatible with FreeSync modes that only differ in the
5442  * refresh rate, DC will skip the full update and avoid blink during the
5443  * transition. For example, the video player can change the modesetting from
5444  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5445  * causing any display blink. This same concept can be applied to a mode
5446  * setting change.
5447  */
5448 static struct drm_display_mode *
5449 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5450 		bool use_probed_modes)
5451 {
5452 	struct drm_display_mode *m, *m_pref = NULL;
5453 	u16 current_refresh, highest_refresh;
5454 	struct list_head *list_head = use_probed_modes ?
5455 		&aconnector->base.probed_modes :
5456 		&aconnector->base.modes;
5457 
5458 	if (aconnector->freesync_vid_base.clock != 0)
5459 		return &aconnector->freesync_vid_base;
5460 
5461 	/* Find the preferred mode */
5462 	list_for_each_entry (m, list_head, head) {
5463 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5464 			m_pref = m;
5465 			break;
5466 		}
5467 	}
5468 
5469 	if (!m_pref) {
5470 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5471 		m_pref = list_first_entry_or_null(
5472 				&aconnector->base.modes, struct drm_display_mode, head);
5473 		if (!m_pref) {
5474 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5475 			return NULL;
5476 		}
5477 	}
5478 
5479 	highest_refresh = drm_mode_vrefresh(m_pref);
5480 
5481 	/*
5482 	 * Find the mode with highest refresh rate with same resolution.
5483 	 * For some monitors, preferred mode is not the mode with highest
5484 	 * supported refresh rate.
5485 	 */
5486 	list_for_each_entry (m, list_head, head) {
5487 		current_refresh  = drm_mode_vrefresh(m);
5488 
5489 		if (m->hdisplay == m_pref->hdisplay &&
5490 		    m->vdisplay == m_pref->vdisplay &&
5491 		    highest_refresh < current_refresh) {
5492 			highest_refresh = current_refresh;
5493 			m_pref = m;
5494 		}
5495 	}
5496 
5497 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5498 	return m_pref;
5499 }
5500 
5501 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5502 		struct amdgpu_dm_connector *aconnector)
5503 {
5504 	struct drm_display_mode *high_mode;
5505 	int timing_diff;
5506 
5507 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5508 	if (!high_mode || !mode)
5509 		return false;
5510 
5511 	timing_diff = high_mode->vtotal - mode->vtotal;
5512 
5513 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5514 	    high_mode->hdisplay != mode->hdisplay ||
5515 	    high_mode->vdisplay != mode->vdisplay ||
5516 	    high_mode->hsync_start != mode->hsync_start ||
5517 	    high_mode->hsync_end != mode->hsync_end ||
5518 	    high_mode->htotal != mode->htotal ||
5519 	    high_mode->hskew != mode->hskew ||
5520 	    high_mode->vscan != mode->vscan ||
5521 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5522 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5523 		return false;
5524 	else
5525 		return true;
5526 }
5527 
5528 #if defined(CONFIG_DRM_AMD_DC_DCN)
5529 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5530 			    struct dc_sink *sink, struct dc_stream_state *stream,
5531 			    struct dsc_dec_dpcd_caps *dsc_caps)
5532 {
5533 	stream->timing.flags.DSC = 0;
5534 	dsc_caps->is_dsc_supported = false;
5535 
5536 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5537 	    sink->sink_signal == SIGNAL_TYPE_EDP)) {
5538 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5539 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5540 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5541 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5542 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5543 				dsc_caps);
5544 	}
5545 }
5546 
5547 
5548 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5549 				    struct dc_sink *sink, struct dc_stream_state *stream,
5550 				    struct dsc_dec_dpcd_caps *dsc_caps,
5551 				    uint32_t max_dsc_target_bpp_limit_override)
5552 {
5553 	const struct dc_link_settings *verified_link_cap = NULL;
5554 	uint32_t link_bw_in_kbps;
5555 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5556 	struct dc *dc = sink->ctx->dc;
5557 	struct dc_dsc_bw_range bw_range = {0};
5558 	struct dc_dsc_config dsc_cfg = {0};
5559 
5560 	verified_link_cap = dc_link_get_link_cap(stream->link);
5561 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5562 	edp_min_bpp_x16 = 8 * 16;
5563 	edp_max_bpp_x16 = 8 * 16;
5564 
5565 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5566 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5567 
5568 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
5569 		edp_min_bpp_x16 = edp_max_bpp_x16;
5570 
5571 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5572 				dc->debug.dsc_min_slice_height_override,
5573 				edp_min_bpp_x16, edp_max_bpp_x16,
5574 				dsc_caps,
5575 				&stream->timing,
5576 				&bw_range)) {
5577 
5578 		if (bw_range.max_kbps < link_bw_in_kbps) {
5579 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5580 					dsc_caps,
5581 					dc->debug.dsc_min_slice_height_override,
5582 					max_dsc_target_bpp_limit_override,
5583 					0,
5584 					&stream->timing,
5585 					&dsc_cfg)) {
5586 				stream->timing.dsc_cfg = dsc_cfg;
5587 				stream->timing.flags.DSC = 1;
5588 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5589 			}
5590 			return;
5591 		}
5592 	}
5593 
5594 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5595 				dsc_caps,
5596 				dc->debug.dsc_min_slice_height_override,
5597 				max_dsc_target_bpp_limit_override,
5598 				link_bw_in_kbps,
5599 				&stream->timing,
5600 				&dsc_cfg)) {
5601 		stream->timing.dsc_cfg = dsc_cfg;
5602 		stream->timing.flags.DSC = 1;
5603 	}
5604 }
5605 
5606 
5607 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5608 					struct dc_sink *sink, struct dc_stream_state *stream,
5609 					struct dsc_dec_dpcd_caps *dsc_caps)
5610 {
5611 	struct drm_connector *drm_connector = &aconnector->base;
5612 	uint32_t link_bandwidth_kbps;
5613 	uint32_t max_dsc_target_bpp_limit_override = 0;
5614 	struct dc *dc = sink->ctx->dc;
5615 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5616 	uint32_t dsc_max_supported_bw_in_kbps;
5617 
5618 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5619 							dc_link_get_link_cap(aconnector->dc_link));
5620 	if (stream->link && stream->link->local_sink)
5621 		max_dsc_target_bpp_limit_override =
5622 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5623 
5624 	/* Set DSC policy according to dsc_clock_en */
5625 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5626 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5627 
5628 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
5629 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5630 
5631 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5632 
5633 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5634 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5635 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5636 						dsc_caps,
5637 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5638 						max_dsc_target_bpp_limit_override,
5639 						link_bandwidth_kbps,
5640 						&stream->timing,
5641 						&stream->timing.dsc_cfg)) {
5642 				stream->timing.flags.DSC = 1;
5643 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5644 			}
5645 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5646 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5647 			max_supported_bw_in_kbps = link_bandwidth_kbps;
5648 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5649 
5650 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5651 					max_supported_bw_in_kbps > 0 &&
5652 					dsc_max_supported_bw_in_kbps > 0)
5653 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5654 						dsc_caps,
5655 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5656 						max_dsc_target_bpp_limit_override,
5657 						dsc_max_supported_bw_in_kbps,
5658 						&stream->timing,
5659 						&stream->timing.dsc_cfg)) {
5660 					stream->timing.flags.DSC = 1;
5661 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5662 									 __func__, drm_connector->name);
5663 				}
5664 		}
5665 	}
5666 
5667 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5668 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5669 		stream->timing.flags.DSC = 1;
5670 
5671 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5672 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5673 
5674 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5675 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5676 
5677 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5678 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5679 }
5680 #endif /* CONFIG_DRM_AMD_DC_DCN */
5681 
5682 static struct dc_stream_state *
5683 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5684 		       const struct drm_display_mode *drm_mode,
5685 		       const struct dm_connector_state *dm_state,
5686 		       const struct dc_stream_state *old_stream,
5687 		       int requested_bpc)
5688 {
5689 	struct drm_display_mode *preferred_mode = NULL;
5690 	struct drm_connector *drm_connector;
5691 	const struct drm_connector_state *con_state =
5692 		dm_state ? &dm_state->base : NULL;
5693 	struct dc_stream_state *stream = NULL;
5694 	struct drm_display_mode mode = *drm_mode;
5695 	struct drm_display_mode saved_mode;
5696 	struct drm_display_mode *freesync_mode = NULL;
5697 	bool native_mode_found = false;
5698 	bool recalculate_timing = false;
5699 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5700 	int mode_refresh;
5701 	int preferred_refresh = 0;
5702 #if defined(CONFIG_DRM_AMD_DC_DCN)
5703 	struct dsc_dec_dpcd_caps dsc_caps;
5704 #endif
5705 
5706 	struct dc_sink *sink = NULL;
5707 
5708 	memset(&saved_mode, 0, sizeof(saved_mode));
5709 
5710 	if (aconnector == NULL) {
5711 		DRM_ERROR("aconnector is NULL!\n");
5712 		return stream;
5713 	}
5714 
5715 	drm_connector = &aconnector->base;
5716 
5717 	if (!aconnector->dc_sink) {
5718 		sink = create_fake_sink(aconnector);
5719 		if (!sink)
5720 			return stream;
5721 	} else {
5722 		sink = aconnector->dc_sink;
5723 		dc_sink_retain(sink);
5724 	}
5725 
5726 	stream = dc_create_stream_for_sink(sink);
5727 
5728 	if (stream == NULL) {
5729 		DRM_ERROR("Failed to create stream for sink!\n");
5730 		goto finish;
5731 	}
5732 
5733 	stream->dm_stream_context = aconnector;
5734 
5735 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5736 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5737 
5738 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5739 		/* Search for preferred mode */
5740 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5741 			native_mode_found = true;
5742 			break;
5743 		}
5744 	}
5745 	if (!native_mode_found)
5746 		preferred_mode = list_first_entry_or_null(
5747 				&aconnector->base.modes,
5748 				struct drm_display_mode,
5749 				head);
5750 
5751 	mode_refresh = drm_mode_vrefresh(&mode);
5752 
5753 	if (preferred_mode == NULL) {
5754 		/*
5755 		 * This may not be an error, the use case is when we have no
5756 		 * usermode calls to reset and set mode upon hotplug. In this
5757 		 * case, we call set mode ourselves to restore the previous mode
5758 		 * and the modelist may not be filled in in time.
5759 		 */
5760 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5761 	} else {
5762 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5763 		if (recalculate_timing) {
5764 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5765 			drm_mode_copy(&saved_mode, &mode);
5766 			drm_mode_copy(&mode, freesync_mode);
5767 		} else {
5768 			decide_crtc_timing_for_drm_display_mode(
5769 					&mode, preferred_mode, scale);
5770 
5771 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5772 		}
5773 	}
5774 
5775 	if (recalculate_timing)
5776 		drm_mode_set_crtcinfo(&saved_mode, 0);
5777 	else if (!dm_state)
5778 		drm_mode_set_crtcinfo(&mode, 0);
5779 
5780 	/*
5781 	* If scaling is enabled and refresh rate didn't change
5782 	* we copy the vic and polarities of the old timings
5783 	*/
5784 	if (!scale || mode_refresh != preferred_refresh)
5785 		fill_stream_properties_from_drm_display_mode(
5786 			stream, &mode, &aconnector->base, con_state, NULL,
5787 			requested_bpc);
5788 	else
5789 		fill_stream_properties_from_drm_display_mode(
5790 			stream, &mode, &aconnector->base, con_state, old_stream,
5791 			requested_bpc);
5792 
5793 #if defined(CONFIG_DRM_AMD_DC_DCN)
5794 	/* SST DSC determination policy */
5795 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5796 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5797 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5798 #endif
5799 
5800 	update_stream_scaling_settings(&mode, dm_state, stream);
5801 
5802 	fill_audio_info(
5803 		&stream->audio_info,
5804 		drm_connector,
5805 		sink);
5806 
5807 	update_stream_signal(stream, sink);
5808 
5809 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5810 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5811 
5812 	if (stream->link->psr_settings.psr_feature_enabled) {
5813 		//
5814 		// should decide stream support vsc sdp colorimetry capability
5815 		// before building vsc info packet
5816 		//
5817 		stream->use_vsc_sdp_for_colorimetry = false;
5818 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5819 			stream->use_vsc_sdp_for_colorimetry =
5820 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5821 		} else {
5822 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5823 				stream->use_vsc_sdp_for_colorimetry = true;
5824 		}
5825 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
5826 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5827 
5828 	}
5829 finish:
5830 	dc_sink_release(sink);
5831 
5832 	return stream;
5833 }
5834 
5835 static enum drm_connector_status
5836 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5837 {
5838 	bool connected;
5839 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5840 
5841 	/*
5842 	 * Notes:
5843 	 * 1. This interface is NOT called in context of HPD irq.
5844 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5845 	 * makes it a bad place for *any* MST-related activity.
5846 	 */
5847 
5848 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5849 	    !aconnector->fake_enable)
5850 		connected = (aconnector->dc_sink != NULL);
5851 	else
5852 		connected = (aconnector->base.force == DRM_FORCE_ON ||
5853 				aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5854 
5855 	update_subconnector_property(aconnector);
5856 
5857 	return (connected ? connector_status_connected :
5858 			connector_status_disconnected);
5859 }
5860 
5861 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5862 					    struct drm_connector_state *connector_state,
5863 					    struct drm_property *property,
5864 					    uint64_t val)
5865 {
5866 	struct drm_device *dev = connector->dev;
5867 	struct amdgpu_device *adev = drm_to_adev(dev);
5868 	struct dm_connector_state *dm_old_state =
5869 		to_dm_connector_state(connector->state);
5870 	struct dm_connector_state *dm_new_state =
5871 		to_dm_connector_state(connector_state);
5872 
5873 	int ret = -EINVAL;
5874 
5875 	if (property == dev->mode_config.scaling_mode_property) {
5876 		enum amdgpu_rmx_type rmx_type;
5877 
5878 		switch (val) {
5879 		case DRM_MODE_SCALE_CENTER:
5880 			rmx_type = RMX_CENTER;
5881 			break;
5882 		case DRM_MODE_SCALE_ASPECT:
5883 			rmx_type = RMX_ASPECT;
5884 			break;
5885 		case DRM_MODE_SCALE_FULLSCREEN:
5886 			rmx_type = RMX_FULL;
5887 			break;
5888 		case DRM_MODE_SCALE_NONE:
5889 		default:
5890 			rmx_type = RMX_OFF;
5891 			break;
5892 		}
5893 
5894 		if (dm_old_state->scaling == rmx_type)
5895 			return 0;
5896 
5897 		dm_new_state->scaling = rmx_type;
5898 		ret = 0;
5899 	} else if (property == adev->mode_info.underscan_hborder_property) {
5900 		dm_new_state->underscan_hborder = val;
5901 		ret = 0;
5902 	} else if (property == adev->mode_info.underscan_vborder_property) {
5903 		dm_new_state->underscan_vborder = val;
5904 		ret = 0;
5905 	} else if (property == adev->mode_info.underscan_property) {
5906 		dm_new_state->underscan_enable = val;
5907 		ret = 0;
5908 	} else if (property == adev->mode_info.abm_level_property) {
5909 		dm_new_state->abm_level = val;
5910 		ret = 0;
5911 	}
5912 
5913 	return ret;
5914 }
5915 
5916 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5917 					    const struct drm_connector_state *state,
5918 					    struct drm_property *property,
5919 					    uint64_t *val)
5920 {
5921 	struct drm_device *dev = connector->dev;
5922 	struct amdgpu_device *adev = drm_to_adev(dev);
5923 	struct dm_connector_state *dm_state =
5924 		to_dm_connector_state(state);
5925 	int ret = -EINVAL;
5926 
5927 	if (property == dev->mode_config.scaling_mode_property) {
5928 		switch (dm_state->scaling) {
5929 		case RMX_CENTER:
5930 			*val = DRM_MODE_SCALE_CENTER;
5931 			break;
5932 		case RMX_ASPECT:
5933 			*val = DRM_MODE_SCALE_ASPECT;
5934 			break;
5935 		case RMX_FULL:
5936 			*val = DRM_MODE_SCALE_FULLSCREEN;
5937 			break;
5938 		case RMX_OFF:
5939 		default:
5940 			*val = DRM_MODE_SCALE_NONE;
5941 			break;
5942 		}
5943 		ret = 0;
5944 	} else if (property == adev->mode_info.underscan_hborder_property) {
5945 		*val = dm_state->underscan_hborder;
5946 		ret = 0;
5947 	} else if (property == adev->mode_info.underscan_vborder_property) {
5948 		*val = dm_state->underscan_vborder;
5949 		ret = 0;
5950 	} else if (property == adev->mode_info.underscan_property) {
5951 		*val = dm_state->underscan_enable;
5952 		ret = 0;
5953 	} else if (property == adev->mode_info.abm_level_property) {
5954 		*val = dm_state->abm_level;
5955 		ret = 0;
5956 	}
5957 
5958 	return ret;
5959 }
5960 
5961 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5962 {
5963 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5964 
5965 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5966 }
5967 
5968 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5969 {
5970 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5971 	const struct dc_link *link = aconnector->dc_link;
5972 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5973 	struct amdgpu_display_manager *dm = &adev->dm;
5974 	int i;
5975 
5976 	/*
5977 	 * Call only if mst_mgr was initialized before since it's not done
5978 	 * for all connector types.
5979 	 */
5980 	if (aconnector->mst_mgr.dev)
5981 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5982 
5983 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5984 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5985 	for (i = 0; i < dm->num_of_edps; i++) {
5986 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
5987 			backlight_device_unregister(dm->backlight_dev[i]);
5988 			dm->backlight_dev[i] = NULL;
5989 		}
5990 	}
5991 #endif
5992 
5993 	if (aconnector->dc_em_sink)
5994 		dc_sink_release(aconnector->dc_em_sink);
5995 	aconnector->dc_em_sink = NULL;
5996 	if (aconnector->dc_sink)
5997 		dc_sink_release(aconnector->dc_sink);
5998 	aconnector->dc_sink = NULL;
5999 
6000 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6001 	drm_connector_unregister(connector);
6002 	drm_connector_cleanup(connector);
6003 	if (aconnector->i2c) {
6004 		i2c_del_adapter(&aconnector->i2c->base);
6005 		kfree(aconnector->i2c);
6006 	}
6007 	kfree(aconnector->dm_dp_aux.aux.name);
6008 
6009 	kfree(connector);
6010 }
6011 
6012 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6013 {
6014 	struct dm_connector_state *state =
6015 		to_dm_connector_state(connector->state);
6016 
6017 	if (connector->state)
6018 		__drm_atomic_helper_connector_destroy_state(connector->state);
6019 
6020 	kfree(state);
6021 
6022 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6023 
6024 	if (state) {
6025 		state->scaling = RMX_OFF;
6026 		state->underscan_enable = false;
6027 		state->underscan_hborder = 0;
6028 		state->underscan_vborder = 0;
6029 		state->base.max_requested_bpc = 8;
6030 		state->vcpi_slots = 0;
6031 		state->pbn = 0;
6032 
6033 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6034 			state->abm_level = amdgpu_dm_abm_level;
6035 
6036 		__drm_atomic_helper_connector_reset(connector, &state->base);
6037 	}
6038 }
6039 
6040 struct drm_connector_state *
6041 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6042 {
6043 	struct dm_connector_state *state =
6044 		to_dm_connector_state(connector->state);
6045 
6046 	struct dm_connector_state *new_state =
6047 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6048 
6049 	if (!new_state)
6050 		return NULL;
6051 
6052 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6053 
6054 	new_state->freesync_capable = state->freesync_capable;
6055 	new_state->abm_level = state->abm_level;
6056 	new_state->scaling = state->scaling;
6057 	new_state->underscan_enable = state->underscan_enable;
6058 	new_state->underscan_hborder = state->underscan_hborder;
6059 	new_state->underscan_vborder = state->underscan_vborder;
6060 	new_state->vcpi_slots = state->vcpi_slots;
6061 	new_state->pbn = state->pbn;
6062 	return &new_state->base;
6063 }
6064 
6065 static int
6066 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6067 {
6068 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6069 		to_amdgpu_dm_connector(connector);
6070 	int r;
6071 
6072 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6073 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6074 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6075 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6076 		if (r)
6077 			return r;
6078 	}
6079 
6080 #if defined(CONFIG_DEBUG_FS)
6081 	connector_debugfs_init(amdgpu_dm_connector);
6082 #endif
6083 
6084 	return 0;
6085 }
6086 
6087 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6088 	.reset = amdgpu_dm_connector_funcs_reset,
6089 	.detect = amdgpu_dm_connector_detect,
6090 	.fill_modes = drm_helper_probe_single_connector_modes,
6091 	.destroy = amdgpu_dm_connector_destroy,
6092 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6093 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6094 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6095 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6096 	.late_register = amdgpu_dm_connector_late_register,
6097 	.early_unregister = amdgpu_dm_connector_unregister
6098 };
6099 
6100 static int get_modes(struct drm_connector *connector)
6101 {
6102 	return amdgpu_dm_connector_get_modes(connector);
6103 }
6104 
6105 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6106 {
6107 	struct dc_sink_init_data init_params = {
6108 			.link = aconnector->dc_link,
6109 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6110 	};
6111 	struct edid *edid;
6112 
6113 	if (!aconnector->base.edid_blob_ptr) {
6114 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6115 				aconnector->base.name);
6116 
6117 		aconnector->base.force = DRM_FORCE_OFF;
6118 		aconnector->base.override_edid = false;
6119 		return;
6120 	}
6121 
6122 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6123 
6124 	aconnector->edid = edid;
6125 
6126 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6127 		aconnector->dc_link,
6128 		(uint8_t *)edid,
6129 		(edid->extensions + 1) * EDID_LENGTH,
6130 		&init_params);
6131 
6132 	if (aconnector->base.force == DRM_FORCE_ON) {
6133 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6134 		aconnector->dc_link->local_sink :
6135 		aconnector->dc_em_sink;
6136 		dc_sink_retain(aconnector->dc_sink);
6137 	}
6138 }
6139 
6140 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6141 {
6142 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6143 
6144 	/*
6145 	 * In case of headless boot with force on for DP managed connector
6146 	 * Those settings have to be != 0 to get initial modeset
6147 	 */
6148 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6149 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6150 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6151 	}
6152 
6153 
6154 	aconnector->base.override_edid = true;
6155 	create_eml_sink(aconnector);
6156 }
6157 
6158 struct dc_stream_state *
6159 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6160 				const struct drm_display_mode *drm_mode,
6161 				const struct dm_connector_state *dm_state,
6162 				const struct dc_stream_state *old_stream)
6163 {
6164 	struct drm_connector *connector = &aconnector->base;
6165 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6166 	struct dc_stream_state *stream;
6167 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6168 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6169 	enum dc_status dc_result = DC_OK;
6170 
6171 	do {
6172 		stream = create_stream_for_sink(aconnector, drm_mode,
6173 						dm_state, old_stream,
6174 						requested_bpc);
6175 		if (stream == NULL) {
6176 			DRM_ERROR("Failed to create stream for sink!\n");
6177 			break;
6178 		}
6179 
6180 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6181 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6182 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6183 
6184 		if (dc_result != DC_OK) {
6185 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6186 				      drm_mode->hdisplay,
6187 				      drm_mode->vdisplay,
6188 				      drm_mode->clock,
6189 				      dc_result,
6190 				      dc_status_to_str(dc_result));
6191 
6192 			dc_stream_release(stream);
6193 			stream = NULL;
6194 			requested_bpc -= 2; /* lower bpc to retry validation */
6195 		}
6196 
6197 	} while (stream == NULL && requested_bpc >= 6);
6198 
6199 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6200 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6201 
6202 		aconnector->force_yuv420_output = true;
6203 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6204 						dm_state, old_stream);
6205 		aconnector->force_yuv420_output = false;
6206 	}
6207 
6208 	return stream;
6209 }
6210 
6211 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6212 				   struct drm_display_mode *mode)
6213 {
6214 	int result = MODE_ERROR;
6215 	struct dc_sink *dc_sink;
6216 	/* TODO: Unhardcode stream count */
6217 	struct dc_stream_state *stream;
6218 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6219 
6220 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6221 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6222 		return result;
6223 
6224 	/*
6225 	 * Only run this the first time mode_valid is called to initilialize
6226 	 * EDID mgmt
6227 	 */
6228 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6229 		!aconnector->dc_em_sink)
6230 		handle_edid_mgmt(aconnector);
6231 
6232 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6233 
6234 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6235 				aconnector->base.force != DRM_FORCE_ON) {
6236 		DRM_ERROR("dc_sink is NULL!\n");
6237 		goto fail;
6238 	}
6239 
6240 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6241 	if (stream) {
6242 		dc_stream_release(stream);
6243 		result = MODE_OK;
6244 	}
6245 
6246 fail:
6247 	/* TODO: error handling*/
6248 	return result;
6249 }
6250 
6251 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6252 				struct dc_info_packet *out)
6253 {
6254 	struct hdmi_drm_infoframe frame;
6255 	unsigned char buf[30]; /* 26 + 4 */
6256 	ssize_t len;
6257 	int ret, i;
6258 
6259 	memset(out, 0, sizeof(*out));
6260 
6261 	if (!state->hdr_output_metadata)
6262 		return 0;
6263 
6264 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6265 	if (ret)
6266 		return ret;
6267 
6268 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6269 	if (len < 0)
6270 		return (int)len;
6271 
6272 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6273 	if (len != 30)
6274 		return -EINVAL;
6275 
6276 	/* Prepare the infopacket for DC. */
6277 	switch (state->connector->connector_type) {
6278 	case DRM_MODE_CONNECTOR_HDMIA:
6279 		out->hb0 = 0x87; /* type */
6280 		out->hb1 = 0x01; /* version */
6281 		out->hb2 = 0x1A; /* length */
6282 		out->sb[0] = buf[3]; /* checksum */
6283 		i = 1;
6284 		break;
6285 
6286 	case DRM_MODE_CONNECTOR_DisplayPort:
6287 	case DRM_MODE_CONNECTOR_eDP:
6288 		out->hb0 = 0x00; /* sdp id, zero */
6289 		out->hb1 = 0x87; /* type */
6290 		out->hb2 = 0x1D; /* payload len - 1 */
6291 		out->hb3 = (0x13 << 2); /* sdp version */
6292 		out->sb[0] = 0x01; /* version */
6293 		out->sb[1] = 0x1A; /* length */
6294 		i = 2;
6295 		break;
6296 
6297 	default:
6298 		return -EINVAL;
6299 	}
6300 
6301 	memcpy(&out->sb[i], &buf[4], 26);
6302 	out->valid = true;
6303 
6304 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6305 		       sizeof(out->sb), false);
6306 
6307 	return 0;
6308 }
6309 
6310 static int
6311 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6312 				 struct drm_atomic_state *state)
6313 {
6314 	struct drm_connector_state *new_con_state =
6315 		drm_atomic_get_new_connector_state(state, conn);
6316 	struct drm_connector_state *old_con_state =
6317 		drm_atomic_get_old_connector_state(state, conn);
6318 	struct drm_crtc *crtc = new_con_state->crtc;
6319 	struct drm_crtc_state *new_crtc_state;
6320 	int ret;
6321 
6322 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6323 
6324 	if (!crtc)
6325 		return 0;
6326 
6327 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6328 		struct dc_info_packet hdr_infopacket;
6329 
6330 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6331 		if (ret)
6332 			return ret;
6333 
6334 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6335 		if (IS_ERR(new_crtc_state))
6336 			return PTR_ERR(new_crtc_state);
6337 
6338 		/*
6339 		 * DC considers the stream backends changed if the
6340 		 * static metadata changes. Forcing the modeset also
6341 		 * gives a simple way for userspace to switch from
6342 		 * 8bpc to 10bpc when setting the metadata to enter
6343 		 * or exit HDR.
6344 		 *
6345 		 * Changing the static metadata after it's been
6346 		 * set is permissible, however. So only force a
6347 		 * modeset if we're entering or exiting HDR.
6348 		 */
6349 		new_crtc_state->mode_changed =
6350 			!old_con_state->hdr_output_metadata ||
6351 			!new_con_state->hdr_output_metadata;
6352 	}
6353 
6354 	return 0;
6355 }
6356 
6357 static const struct drm_connector_helper_funcs
6358 amdgpu_dm_connector_helper_funcs = {
6359 	/*
6360 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6361 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6362 	 * are missing after user start lightdm. So we need to renew modes list.
6363 	 * in get_modes call back, not just return the modes count
6364 	 */
6365 	.get_modes = get_modes,
6366 	.mode_valid = amdgpu_dm_connector_mode_valid,
6367 	.atomic_check = amdgpu_dm_connector_atomic_check,
6368 };
6369 
6370 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6371 {
6372 
6373 }
6374 
6375 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6376 {
6377 	switch (display_color_depth) {
6378 	case COLOR_DEPTH_666:
6379 		return 6;
6380 	case COLOR_DEPTH_888:
6381 		return 8;
6382 	case COLOR_DEPTH_101010:
6383 		return 10;
6384 	case COLOR_DEPTH_121212:
6385 		return 12;
6386 	case COLOR_DEPTH_141414:
6387 		return 14;
6388 	case COLOR_DEPTH_161616:
6389 		return 16;
6390 	default:
6391 		break;
6392 	}
6393 	return 0;
6394 }
6395 
6396 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6397 					  struct drm_crtc_state *crtc_state,
6398 					  struct drm_connector_state *conn_state)
6399 {
6400 	struct drm_atomic_state *state = crtc_state->state;
6401 	struct drm_connector *connector = conn_state->connector;
6402 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6403 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6404 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6405 	struct drm_dp_mst_topology_mgr *mst_mgr;
6406 	struct drm_dp_mst_port *mst_port;
6407 	enum dc_color_depth color_depth;
6408 	int clock, bpp = 0;
6409 	bool is_y420 = false;
6410 
6411 	if (!aconnector->port || !aconnector->dc_sink)
6412 		return 0;
6413 
6414 	mst_port = aconnector->port;
6415 	mst_mgr = &aconnector->mst_port->mst_mgr;
6416 
6417 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6418 		return 0;
6419 
6420 	if (!state->duplicated) {
6421 		int max_bpc = conn_state->max_requested_bpc;
6422 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6423 			  aconnector->force_yuv420_output;
6424 		color_depth = convert_color_depth_from_display_info(connector,
6425 								    is_y420,
6426 								    max_bpc);
6427 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6428 		clock = adjusted_mode->clock;
6429 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6430 	}
6431 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6432 									   mst_mgr,
6433 									   mst_port,
6434 									   dm_new_connector_state->pbn,
6435 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6436 	if (dm_new_connector_state->vcpi_slots < 0) {
6437 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6438 		return dm_new_connector_state->vcpi_slots;
6439 	}
6440 	return 0;
6441 }
6442 
6443 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6444 	.disable = dm_encoder_helper_disable,
6445 	.atomic_check = dm_encoder_helper_atomic_check
6446 };
6447 
6448 #if defined(CONFIG_DRM_AMD_DC_DCN)
6449 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6450 					    struct dc_state *dc_state,
6451 					    struct dsc_mst_fairness_vars *vars)
6452 {
6453 	struct dc_stream_state *stream = NULL;
6454 	struct drm_connector *connector;
6455 	struct drm_connector_state *new_con_state;
6456 	struct amdgpu_dm_connector *aconnector;
6457 	struct dm_connector_state *dm_conn_state;
6458 	int i, j;
6459 	int vcpi, pbn_div, pbn, slot_num = 0;
6460 
6461 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6462 
6463 		aconnector = to_amdgpu_dm_connector(connector);
6464 
6465 		if (!aconnector->port)
6466 			continue;
6467 
6468 		if (!new_con_state || !new_con_state->crtc)
6469 			continue;
6470 
6471 		dm_conn_state = to_dm_connector_state(new_con_state);
6472 
6473 		for (j = 0; j < dc_state->stream_count; j++) {
6474 			stream = dc_state->streams[j];
6475 			if (!stream)
6476 				continue;
6477 
6478 			if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6479 				break;
6480 
6481 			stream = NULL;
6482 		}
6483 
6484 		if (!stream)
6485 			continue;
6486 
6487 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6488 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6489 		for (j = 0; j < dc_state->stream_count; j++) {
6490 			if (vars[j].aconnector == aconnector) {
6491 				pbn = vars[j].pbn;
6492 				break;
6493 			}
6494 		}
6495 
6496 		if (j == dc_state->stream_count)
6497 			continue;
6498 
6499 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
6500 
6501 		if (stream->timing.flags.DSC != 1) {
6502 			dm_conn_state->pbn = pbn;
6503 			dm_conn_state->vcpi_slots = slot_num;
6504 
6505 			drm_dp_mst_atomic_enable_dsc(state,
6506 						     aconnector->port,
6507 						     dm_conn_state->pbn,
6508 						     0,
6509 						     false);
6510 			continue;
6511 		}
6512 
6513 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6514 						    aconnector->port,
6515 						    pbn, pbn_div,
6516 						    true);
6517 		if (vcpi < 0)
6518 			return vcpi;
6519 
6520 		dm_conn_state->pbn = pbn;
6521 		dm_conn_state->vcpi_slots = vcpi;
6522 	}
6523 	return 0;
6524 }
6525 #endif
6526 
6527 static int to_drm_connector_type(enum signal_type st)
6528 {
6529 	switch (st) {
6530 	case SIGNAL_TYPE_HDMI_TYPE_A:
6531 		return DRM_MODE_CONNECTOR_HDMIA;
6532 	case SIGNAL_TYPE_EDP:
6533 		return DRM_MODE_CONNECTOR_eDP;
6534 	case SIGNAL_TYPE_LVDS:
6535 		return DRM_MODE_CONNECTOR_LVDS;
6536 	case SIGNAL_TYPE_RGB:
6537 		return DRM_MODE_CONNECTOR_VGA;
6538 	case SIGNAL_TYPE_DISPLAY_PORT:
6539 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6540 		return DRM_MODE_CONNECTOR_DisplayPort;
6541 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6542 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6543 		return DRM_MODE_CONNECTOR_DVID;
6544 	case SIGNAL_TYPE_VIRTUAL:
6545 		return DRM_MODE_CONNECTOR_VIRTUAL;
6546 
6547 	default:
6548 		return DRM_MODE_CONNECTOR_Unknown;
6549 	}
6550 }
6551 
6552 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6553 {
6554 	struct drm_encoder *encoder;
6555 
6556 	/* There is only one encoder per connector */
6557 	drm_connector_for_each_possible_encoder(connector, encoder)
6558 		return encoder;
6559 
6560 	return NULL;
6561 }
6562 
6563 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6564 {
6565 	struct drm_encoder *encoder;
6566 	struct amdgpu_encoder *amdgpu_encoder;
6567 
6568 	encoder = amdgpu_dm_connector_to_encoder(connector);
6569 
6570 	if (encoder == NULL)
6571 		return;
6572 
6573 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6574 
6575 	amdgpu_encoder->native_mode.clock = 0;
6576 
6577 	if (!list_empty(&connector->probed_modes)) {
6578 		struct drm_display_mode *preferred_mode = NULL;
6579 
6580 		list_for_each_entry(preferred_mode,
6581 				    &connector->probed_modes,
6582 				    head) {
6583 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6584 				amdgpu_encoder->native_mode = *preferred_mode;
6585 
6586 			break;
6587 		}
6588 
6589 	}
6590 }
6591 
6592 static struct drm_display_mode *
6593 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6594 			     char *name,
6595 			     int hdisplay, int vdisplay)
6596 {
6597 	struct drm_device *dev = encoder->dev;
6598 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6599 	struct drm_display_mode *mode = NULL;
6600 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6601 
6602 	mode = drm_mode_duplicate(dev, native_mode);
6603 
6604 	if (mode == NULL)
6605 		return NULL;
6606 
6607 	mode->hdisplay = hdisplay;
6608 	mode->vdisplay = vdisplay;
6609 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6610 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6611 
6612 	return mode;
6613 
6614 }
6615 
6616 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6617 						 struct drm_connector *connector)
6618 {
6619 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6620 	struct drm_display_mode *mode = NULL;
6621 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6622 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6623 				to_amdgpu_dm_connector(connector);
6624 	int i;
6625 	int n;
6626 	struct mode_size {
6627 		char name[DRM_DISPLAY_MODE_LEN];
6628 		int w;
6629 		int h;
6630 	} common_modes[] = {
6631 		{  "640x480",  640,  480},
6632 		{  "800x600",  800,  600},
6633 		{ "1024x768", 1024,  768},
6634 		{ "1280x720", 1280,  720},
6635 		{ "1280x800", 1280,  800},
6636 		{"1280x1024", 1280, 1024},
6637 		{ "1440x900", 1440,  900},
6638 		{"1680x1050", 1680, 1050},
6639 		{"1600x1200", 1600, 1200},
6640 		{"1920x1080", 1920, 1080},
6641 		{"1920x1200", 1920, 1200}
6642 	};
6643 
6644 	n = ARRAY_SIZE(common_modes);
6645 
6646 	for (i = 0; i < n; i++) {
6647 		struct drm_display_mode *curmode = NULL;
6648 		bool mode_existed = false;
6649 
6650 		if (common_modes[i].w > native_mode->hdisplay ||
6651 		    common_modes[i].h > native_mode->vdisplay ||
6652 		   (common_modes[i].w == native_mode->hdisplay &&
6653 		    common_modes[i].h == native_mode->vdisplay))
6654 			continue;
6655 
6656 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6657 			if (common_modes[i].w == curmode->hdisplay &&
6658 			    common_modes[i].h == curmode->vdisplay) {
6659 				mode_existed = true;
6660 				break;
6661 			}
6662 		}
6663 
6664 		if (mode_existed)
6665 			continue;
6666 
6667 		mode = amdgpu_dm_create_common_mode(encoder,
6668 				common_modes[i].name, common_modes[i].w,
6669 				common_modes[i].h);
6670 		if (!mode)
6671 			continue;
6672 
6673 		drm_mode_probed_add(connector, mode);
6674 		amdgpu_dm_connector->num_modes++;
6675 	}
6676 }
6677 
6678 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6679 {
6680 	struct drm_encoder *encoder;
6681 	struct amdgpu_encoder *amdgpu_encoder;
6682 	const struct drm_display_mode *native_mode;
6683 
6684 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6685 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6686 		return;
6687 
6688 	encoder = amdgpu_dm_connector_to_encoder(connector);
6689 	if (!encoder)
6690 		return;
6691 
6692 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6693 
6694 	native_mode = &amdgpu_encoder->native_mode;
6695 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6696 		return;
6697 
6698 	drm_connector_set_panel_orientation_with_quirk(connector,
6699 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6700 						       native_mode->hdisplay,
6701 						       native_mode->vdisplay);
6702 }
6703 
6704 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6705 					      struct edid *edid)
6706 {
6707 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6708 			to_amdgpu_dm_connector(connector);
6709 
6710 	if (edid) {
6711 		/* empty probed_modes */
6712 		INIT_LIST_HEAD(&connector->probed_modes);
6713 		amdgpu_dm_connector->num_modes =
6714 				drm_add_edid_modes(connector, edid);
6715 
6716 		/* sorting the probed modes before calling function
6717 		 * amdgpu_dm_get_native_mode() since EDID can have
6718 		 * more than one preferred mode. The modes that are
6719 		 * later in the probed mode list could be of higher
6720 		 * and preferred resolution. For example, 3840x2160
6721 		 * resolution in base EDID preferred timing and 4096x2160
6722 		 * preferred resolution in DID extension block later.
6723 		 */
6724 		drm_mode_sort(&connector->probed_modes);
6725 		amdgpu_dm_get_native_mode(connector);
6726 
6727 		/* Freesync capabilities are reset by calling
6728 		 * drm_add_edid_modes() and need to be
6729 		 * restored here.
6730 		 */
6731 		amdgpu_dm_update_freesync_caps(connector, edid);
6732 
6733 		amdgpu_set_panel_orientation(connector);
6734 	} else {
6735 		amdgpu_dm_connector->num_modes = 0;
6736 	}
6737 }
6738 
6739 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6740 			      struct drm_display_mode *mode)
6741 {
6742 	struct drm_display_mode *m;
6743 
6744 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6745 		if (drm_mode_equal(m, mode))
6746 			return true;
6747 	}
6748 
6749 	return false;
6750 }
6751 
6752 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6753 {
6754 	const struct drm_display_mode *m;
6755 	struct drm_display_mode *new_mode;
6756 	uint i;
6757 	uint32_t new_modes_count = 0;
6758 
6759 	/* Standard FPS values
6760 	 *
6761 	 * 23.976       - TV/NTSC
6762 	 * 24 	        - Cinema
6763 	 * 25 	        - TV/PAL
6764 	 * 29.97        - TV/NTSC
6765 	 * 30 	        - TV/NTSC
6766 	 * 48 	        - Cinema HFR
6767 	 * 50 	        - TV/PAL
6768 	 * 60 	        - Commonly used
6769 	 * 48,72,96,120 - Multiples of 24
6770 	 */
6771 	static const uint32_t common_rates[] = {
6772 		23976, 24000, 25000, 29970, 30000,
6773 		48000, 50000, 60000, 72000, 96000, 120000
6774 	};
6775 
6776 	/*
6777 	 * Find mode with highest refresh rate with the same resolution
6778 	 * as the preferred mode. Some monitors report a preferred mode
6779 	 * with lower resolution than the highest refresh rate supported.
6780 	 */
6781 
6782 	m = get_highest_refresh_rate_mode(aconnector, true);
6783 	if (!m)
6784 		return 0;
6785 
6786 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6787 		uint64_t target_vtotal, target_vtotal_diff;
6788 		uint64_t num, den;
6789 
6790 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6791 			continue;
6792 
6793 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6794 		    common_rates[i] > aconnector->max_vfreq * 1000)
6795 			continue;
6796 
6797 		num = (unsigned long long)m->clock * 1000 * 1000;
6798 		den = common_rates[i] * (unsigned long long)m->htotal;
6799 		target_vtotal = div_u64(num, den);
6800 		target_vtotal_diff = target_vtotal - m->vtotal;
6801 
6802 		/* Check for illegal modes */
6803 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6804 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
6805 		    m->vtotal + target_vtotal_diff < m->vsync_end)
6806 			continue;
6807 
6808 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6809 		if (!new_mode)
6810 			goto out;
6811 
6812 		new_mode->vtotal += (u16)target_vtotal_diff;
6813 		new_mode->vsync_start += (u16)target_vtotal_diff;
6814 		new_mode->vsync_end += (u16)target_vtotal_diff;
6815 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6816 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
6817 
6818 		if (!is_duplicate_mode(aconnector, new_mode)) {
6819 			drm_mode_probed_add(&aconnector->base, new_mode);
6820 			new_modes_count += 1;
6821 		} else
6822 			drm_mode_destroy(aconnector->base.dev, new_mode);
6823 	}
6824  out:
6825 	return new_modes_count;
6826 }
6827 
6828 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6829 						   struct edid *edid)
6830 {
6831 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6832 		to_amdgpu_dm_connector(connector);
6833 
6834 	if (!edid)
6835 		return;
6836 
6837 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6838 		amdgpu_dm_connector->num_modes +=
6839 			add_fs_modes(amdgpu_dm_connector);
6840 }
6841 
6842 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6843 {
6844 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6845 			to_amdgpu_dm_connector(connector);
6846 	struct drm_encoder *encoder;
6847 	struct edid *edid = amdgpu_dm_connector->edid;
6848 
6849 	encoder = amdgpu_dm_connector_to_encoder(connector);
6850 
6851 	if (!drm_edid_is_valid(edid)) {
6852 		amdgpu_dm_connector->num_modes =
6853 				drm_add_modes_noedid(connector, 640, 480);
6854 	} else {
6855 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6856 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6857 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
6858 	}
6859 	amdgpu_dm_fbc_init(connector);
6860 
6861 	return amdgpu_dm_connector->num_modes;
6862 }
6863 
6864 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6865 				     struct amdgpu_dm_connector *aconnector,
6866 				     int connector_type,
6867 				     struct dc_link *link,
6868 				     int link_index)
6869 {
6870 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6871 
6872 	/*
6873 	 * Some of the properties below require access to state, like bpc.
6874 	 * Allocate some default initial connector state with our reset helper.
6875 	 */
6876 	if (aconnector->base.funcs->reset)
6877 		aconnector->base.funcs->reset(&aconnector->base);
6878 
6879 	aconnector->connector_id = link_index;
6880 	aconnector->dc_link = link;
6881 	aconnector->base.interlace_allowed = false;
6882 	aconnector->base.doublescan_allowed = false;
6883 	aconnector->base.stereo_allowed = false;
6884 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6885 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6886 	aconnector->audio_inst = -1;
6887 	mutex_init(&aconnector->hpd_lock);
6888 
6889 	/*
6890 	 * configure support HPD hot plug connector_>polled default value is 0
6891 	 * which means HPD hot plug not supported
6892 	 */
6893 	switch (connector_type) {
6894 	case DRM_MODE_CONNECTOR_HDMIA:
6895 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6896 		aconnector->base.ycbcr_420_allowed =
6897 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6898 		break;
6899 	case DRM_MODE_CONNECTOR_DisplayPort:
6900 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6901 		link->link_enc = link_enc_cfg_get_link_enc(link);
6902 		ASSERT(link->link_enc);
6903 		if (link->link_enc)
6904 			aconnector->base.ycbcr_420_allowed =
6905 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6906 		break;
6907 	case DRM_MODE_CONNECTOR_DVID:
6908 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6909 		break;
6910 	default:
6911 		break;
6912 	}
6913 
6914 	drm_object_attach_property(&aconnector->base.base,
6915 				dm->ddev->mode_config.scaling_mode_property,
6916 				DRM_MODE_SCALE_NONE);
6917 
6918 	drm_object_attach_property(&aconnector->base.base,
6919 				adev->mode_info.underscan_property,
6920 				UNDERSCAN_OFF);
6921 	drm_object_attach_property(&aconnector->base.base,
6922 				adev->mode_info.underscan_hborder_property,
6923 				0);
6924 	drm_object_attach_property(&aconnector->base.base,
6925 				adev->mode_info.underscan_vborder_property,
6926 				0);
6927 
6928 	if (!aconnector->mst_port)
6929 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6930 
6931 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6932 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6933 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6934 
6935 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6936 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6937 		drm_object_attach_property(&aconnector->base.base,
6938 				adev->mode_info.abm_level_property, 0);
6939 	}
6940 
6941 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6942 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6943 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6944 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
6945 
6946 		if (!aconnector->mst_port)
6947 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6948 
6949 #ifdef CONFIG_DRM_AMD_DC_HDCP
6950 		if (adev->dm.hdcp_workqueue)
6951 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6952 #endif
6953 	}
6954 }
6955 
6956 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6957 			      struct i2c_msg *msgs, int num)
6958 {
6959 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6960 	struct ddc_service *ddc_service = i2c->ddc_service;
6961 	struct i2c_command cmd;
6962 	int i;
6963 	int result = -EIO;
6964 
6965 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6966 
6967 	if (!cmd.payloads)
6968 		return result;
6969 
6970 	cmd.number_of_payloads = num;
6971 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6972 	cmd.speed = 100;
6973 
6974 	for (i = 0; i < num; i++) {
6975 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6976 		cmd.payloads[i].address = msgs[i].addr;
6977 		cmd.payloads[i].length = msgs[i].len;
6978 		cmd.payloads[i].data = msgs[i].buf;
6979 	}
6980 
6981 	if (dc_submit_i2c(
6982 			ddc_service->ctx->dc,
6983 			ddc_service->link->link_index,
6984 			&cmd))
6985 		result = num;
6986 
6987 	kfree(cmd.payloads);
6988 	return result;
6989 }
6990 
6991 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6992 {
6993 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6994 }
6995 
6996 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6997 	.master_xfer = amdgpu_dm_i2c_xfer,
6998 	.functionality = amdgpu_dm_i2c_func,
6999 };
7000 
7001 static struct amdgpu_i2c_adapter *
7002 create_i2c(struct ddc_service *ddc_service,
7003 	   int link_index,
7004 	   int *res)
7005 {
7006 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7007 	struct amdgpu_i2c_adapter *i2c;
7008 
7009 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7010 	if (!i2c)
7011 		return NULL;
7012 	i2c->base.owner = THIS_MODULE;
7013 	i2c->base.class = I2C_CLASS_DDC;
7014 	i2c->base.dev.parent = &adev->pdev->dev;
7015 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7016 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7017 	i2c_set_adapdata(&i2c->base, i2c);
7018 	i2c->ddc_service = ddc_service;
7019 
7020 	return i2c;
7021 }
7022 
7023 
7024 /*
7025  * Note: this function assumes that dc_link_detect() was called for the
7026  * dc_link which will be represented by this aconnector.
7027  */
7028 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7029 				    struct amdgpu_dm_connector *aconnector,
7030 				    uint32_t link_index,
7031 				    struct amdgpu_encoder *aencoder)
7032 {
7033 	int res = 0;
7034 	int connector_type;
7035 	struct dc *dc = dm->dc;
7036 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7037 	struct amdgpu_i2c_adapter *i2c;
7038 
7039 	link->priv = aconnector;
7040 
7041 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7042 
7043 	i2c = create_i2c(link->ddc, link->link_index, &res);
7044 	if (!i2c) {
7045 		DRM_ERROR("Failed to create i2c adapter data\n");
7046 		return -ENOMEM;
7047 	}
7048 
7049 	aconnector->i2c = i2c;
7050 	res = i2c_add_adapter(&i2c->base);
7051 
7052 	if (res) {
7053 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7054 		goto out_free;
7055 	}
7056 
7057 	connector_type = to_drm_connector_type(link->connector_signal);
7058 
7059 	res = drm_connector_init_with_ddc(
7060 			dm->ddev,
7061 			&aconnector->base,
7062 			&amdgpu_dm_connector_funcs,
7063 			connector_type,
7064 			&i2c->base);
7065 
7066 	if (res) {
7067 		DRM_ERROR("connector_init failed\n");
7068 		aconnector->connector_id = -1;
7069 		goto out_free;
7070 	}
7071 
7072 	drm_connector_helper_add(
7073 			&aconnector->base,
7074 			&amdgpu_dm_connector_helper_funcs);
7075 
7076 	amdgpu_dm_connector_init_helper(
7077 		dm,
7078 		aconnector,
7079 		connector_type,
7080 		link,
7081 		link_index);
7082 
7083 	drm_connector_attach_encoder(
7084 		&aconnector->base, &aencoder->base);
7085 
7086 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7087 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7088 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7089 
7090 out_free:
7091 	if (res) {
7092 		kfree(i2c);
7093 		aconnector->i2c = NULL;
7094 	}
7095 	return res;
7096 }
7097 
7098 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7099 {
7100 	switch (adev->mode_info.num_crtc) {
7101 	case 1:
7102 		return 0x1;
7103 	case 2:
7104 		return 0x3;
7105 	case 3:
7106 		return 0x7;
7107 	case 4:
7108 		return 0xf;
7109 	case 5:
7110 		return 0x1f;
7111 	case 6:
7112 	default:
7113 		return 0x3f;
7114 	}
7115 }
7116 
7117 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7118 				  struct amdgpu_encoder *aencoder,
7119 				  uint32_t link_index)
7120 {
7121 	struct amdgpu_device *adev = drm_to_adev(dev);
7122 
7123 	int res = drm_encoder_init(dev,
7124 				   &aencoder->base,
7125 				   &amdgpu_dm_encoder_funcs,
7126 				   DRM_MODE_ENCODER_TMDS,
7127 				   NULL);
7128 
7129 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7130 
7131 	if (!res)
7132 		aencoder->encoder_id = link_index;
7133 	else
7134 		aencoder->encoder_id = -1;
7135 
7136 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7137 
7138 	return res;
7139 }
7140 
7141 static void manage_dm_interrupts(struct amdgpu_device *adev,
7142 				 struct amdgpu_crtc *acrtc,
7143 				 bool enable)
7144 {
7145 	/*
7146 	 * We have no guarantee that the frontend index maps to the same
7147 	 * backend index - some even map to more than one.
7148 	 *
7149 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7150 	 */
7151 	int irq_type =
7152 		amdgpu_display_crtc_idx_to_irq_type(
7153 			adev,
7154 			acrtc->crtc_id);
7155 
7156 	if (enable) {
7157 		drm_crtc_vblank_on(&acrtc->base);
7158 		amdgpu_irq_get(
7159 			adev,
7160 			&adev->pageflip_irq,
7161 			irq_type);
7162 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7163 		amdgpu_irq_get(
7164 			adev,
7165 			&adev->vline0_irq,
7166 			irq_type);
7167 #endif
7168 	} else {
7169 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7170 		amdgpu_irq_put(
7171 			adev,
7172 			&adev->vline0_irq,
7173 			irq_type);
7174 #endif
7175 		amdgpu_irq_put(
7176 			adev,
7177 			&adev->pageflip_irq,
7178 			irq_type);
7179 		drm_crtc_vblank_off(&acrtc->base);
7180 	}
7181 }
7182 
7183 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7184 				      struct amdgpu_crtc *acrtc)
7185 {
7186 	int irq_type =
7187 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7188 
7189 	/**
7190 	 * This reads the current state for the IRQ and force reapplies
7191 	 * the setting to hardware.
7192 	 */
7193 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7194 }
7195 
7196 static bool
7197 is_scaling_state_different(const struct dm_connector_state *dm_state,
7198 			   const struct dm_connector_state *old_dm_state)
7199 {
7200 	if (dm_state->scaling != old_dm_state->scaling)
7201 		return true;
7202 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7203 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7204 			return true;
7205 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7206 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7207 			return true;
7208 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7209 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7210 		return true;
7211 	return false;
7212 }
7213 
7214 #ifdef CONFIG_DRM_AMD_DC_HDCP
7215 static bool is_content_protection_different(struct drm_connector_state *state,
7216 					    const struct drm_connector_state *old_state,
7217 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7218 {
7219 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7220 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7221 
7222 	/* Handle: Type0/1 change */
7223 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7224 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7225 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7226 		return true;
7227 	}
7228 
7229 	/* CP is being re enabled, ignore this
7230 	 *
7231 	 * Handles:	ENABLED -> DESIRED
7232 	 */
7233 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7234 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7235 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7236 		return false;
7237 	}
7238 
7239 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7240 	 *
7241 	 * Handles:	UNDESIRED -> ENABLED
7242 	 */
7243 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7244 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7245 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7246 
7247 	/* Stream removed and re-enabled
7248 	 *
7249 	 * Can sometimes overlap with the HPD case,
7250 	 * thus set update_hdcp to false to avoid
7251 	 * setting HDCP multiple times.
7252 	 *
7253 	 * Handles:	DESIRED -> DESIRED (Special case)
7254 	 */
7255 	if (!(old_state->crtc && old_state->crtc->enabled) &&
7256 		state->crtc && state->crtc->enabled &&
7257 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7258 		dm_con_state->update_hdcp = false;
7259 		return true;
7260 	}
7261 
7262 	/* Hot-plug, headless s3, dpms
7263 	 *
7264 	 * Only start HDCP if the display is connected/enabled.
7265 	 * update_hdcp flag will be set to false until the next
7266 	 * HPD comes in.
7267 	 *
7268 	 * Handles:	DESIRED -> DESIRED (Special case)
7269 	 */
7270 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7271 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7272 		dm_con_state->update_hdcp = false;
7273 		return true;
7274 	}
7275 
7276 	/*
7277 	 * Handles:	UNDESIRED -> UNDESIRED
7278 	 *		DESIRED -> DESIRED
7279 	 *		ENABLED -> ENABLED
7280 	 */
7281 	if (old_state->content_protection == state->content_protection)
7282 		return false;
7283 
7284 	/*
7285 	 * Handles:	UNDESIRED -> DESIRED
7286 	 *		DESIRED -> UNDESIRED
7287 	 *		ENABLED -> UNDESIRED
7288 	 */
7289 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7290 		return true;
7291 
7292 	/*
7293 	 * Handles:	DESIRED -> ENABLED
7294 	 */
7295 	return false;
7296 }
7297 
7298 #endif
7299 static void remove_stream(struct amdgpu_device *adev,
7300 			  struct amdgpu_crtc *acrtc,
7301 			  struct dc_stream_state *stream)
7302 {
7303 	/* this is the update mode case */
7304 
7305 	acrtc->otg_inst = -1;
7306 	acrtc->enabled = false;
7307 }
7308 
7309 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7310 {
7311 
7312 	assert_spin_locked(&acrtc->base.dev->event_lock);
7313 	WARN_ON(acrtc->event);
7314 
7315 	acrtc->event = acrtc->base.state->event;
7316 
7317 	/* Set the flip status */
7318 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7319 
7320 	/* Mark this event as consumed */
7321 	acrtc->base.state->event = NULL;
7322 
7323 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7324 		     acrtc->crtc_id);
7325 }
7326 
7327 static void update_freesync_state_on_stream(
7328 	struct amdgpu_display_manager *dm,
7329 	struct dm_crtc_state *new_crtc_state,
7330 	struct dc_stream_state *new_stream,
7331 	struct dc_plane_state *surface,
7332 	u32 flip_timestamp_in_us)
7333 {
7334 	struct mod_vrr_params vrr_params;
7335 	struct dc_info_packet vrr_infopacket = {0};
7336 	struct amdgpu_device *adev = dm->adev;
7337 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7338 	unsigned long flags;
7339 	bool pack_sdp_v1_3 = false;
7340 
7341 	if (!new_stream)
7342 		return;
7343 
7344 	/*
7345 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7346 	 * For now it's sufficient to just guard against these conditions.
7347 	 */
7348 
7349 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7350 		return;
7351 
7352 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7353         vrr_params = acrtc->dm_irq_params.vrr_params;
7354 
7355 	if (surface) {
7356 		mod_freesync_handle_preflip(
7357 			dm->freesync_module,
7358 			surface,
7359 			new_stream,
7360 			flip_timestamp_in_us,
7361 			&vrr_params);
7362 
7363 		if (adev->family < AMDGPU_FAMILY_AI &&
7364 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7365 			mod_freesync_handle_v_update(dm->freesync_module,
7366 						     new_stream, &vrr_params);
7367 
7368 			/* Need to call this before the frame ends. */
7369 			dc_stream_adjust_vmin_vmax(dm->dc,
7370 						   new_crtc_state->stream,
7371 						   &vrr_params.adjust);
7372 		}
7373 	}
7374 
7375 	mod_freesync_build_vrr_infopacket(
7376 		dm->freesync_module,
7377 		new_stream,
7378 		&vrr_params,
7379 		PACKET_TYPE_VRR,
7380 		TRANSFER_FUNC_UNKNOWN,
7381 		&vrr_infopacket,
7382 		pack_sdp_v1_3);
7383 
7384 	new_crtc_state->freesync_timing_changed |=
7385 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7386 			&vrr_params.adjust,
7387 			sizeof(vrr_params.adjust)) != 0);
7388 
7389 	new_crtc_state->freesync_vrr_info_changed |=
7390 		(memcmp(&new_crtc_state->vrr_infopacket,
7391 			&vrr_infopacket,
7392 			sizeof(vrr_infopacket)) != 0);
7393 
7394 	acrtc->dm_irq_params.vrr_params = vrr_params;
7395 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7396 
7397 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7398 	new_stream->vrr_infopacket = vrr_infopacket;
7399 
7400 	if (new_crtc_state->freesync_vrr_info_changed)
7401 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7402 			      new_crtc_state->base.crtc->base.id,
7403 			      (int)new_crtc_state->base.vrr_enabled,
7404 			      (int)vrr_params.state);
7405 
7406 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7407 }
7408 
7409 static void update_stream_irq_parameters(
7410 	struct amdgpu_display_manager *dm,
7411 	struct dm_crtc_state *new_crtc_state)
7412 {
7413 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7414 	struct mod_vrr_params vrr_params;
7415 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7416 	struct amdgpu_device *adev = dm->adev;
7417 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7418 	unsigned long flags;
7419 
7420 	if (!new_stream)
7421 		return;
7422 
7423 	/*
7424 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7425 	 * For now it's sufficient to just guard against these conditions.
7426 	 */
7427 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7428 		return;
7429 
7430 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7431 	vrr_params = acrtc->dm_irq_params.vrr_params;
7432 
7433 	if (new_crtc_state->vrr_supported &&
7434 	    config.min_refresh_in_uhz &&
7435 	    config.max_refresh_in_uhz) {
7436 		/*
7437 		 * if freesync compatible mode was set, config.state will be set
7438 		 * in atomic check
7439 		 */
7440 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7441 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7442 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7443 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7444 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7445 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7446 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7447 		} else {
7448 			config.state = new_crtc_state->base.vrr_enabled ?
7449 						     VRR_STATE_ACTIVE_VARIABLE :
7450 						     VRR_STATE_INACTIVE;
7451 		}
7452 	} else {
7453 		config.state = VRR_STATE_UNSUPPORTED;
7454 	}
7455 
7456 	mod_freesync_build_vrr_params(dm->freesync_module,
7457 				      new_stream,
7458 				      &config, &vrr_params);
7459 
7460 	new_crtc_state->freesync_timing_changed |=
7461 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7462 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7463 
7464 	new_crtc_state->freesync_config = config;
7465 	/* Copy state for access from DM IRQ handler */
7466 	acrtc->dm_irq_params.freesync_config = config;
7467 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7468 	acrtc->dm_irq_params.vrr_params = vrr_params;
7469 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7470 }
7471 
7472 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7473 					    struct dm_crtc_state *new_state)
7474 {
7475 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7476 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7477 
7478 	if (!old_vrr_active && new_vrr_active) {
7479 		/* Transition VRR inactive -> active:
7480 		 * While VRR is active, we must not disable vblank irq, as a
7481 		 * reenable after disable would compute bogus vblank/pflip
7482 		 * timestamps if it likely happened inside display front-porch.
7483 		 *
7484 		 * We also need vupdate irq for the actual core vblank handling
7485 		 * at end of vblank.
7486 		 */
7487 		dm_set_vupdate_irq(new_state->base.crtc, true);
7488 		drm_crtc_vblank_get(new_state->base.crtc);
7489 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7490 				 __func__, new_state->base.crtc->base.id);
7491 	} else if (old_vrr_active && !new_vrr_active) {
7492 		/* Transition VRR active -> inactive:
7493 		 * Allow vblank irq disable again for fixed refresh rate.
7494 		 */
7495 		dm_set_vupdate_irq(new_state->base.crtc, false);
7496 		drm_crtc_vblank_put(new_state->base.crtc);
7497 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7498 				 __func__, new_state->base.crtc->base.id);
7499 	}
7500 }
7501 
7502 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7503 {
7504 	struct drm_plane *plane;
7505 	struct drm_plane_state *old_plane_state;
7506 	int i;
7507 
7508 	/*
7509 	 * TODO: Make this per-stream so we don't issue redundant updates for
7510 	 * commits with multiple streams.
7511 	 */
7512 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
7513 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7514 			handle_cursor_update(plane, old_plane_state);
7515 }
7516 
7517 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7518 				    struct dc_state *dc_state,
7519 				    struct drm_device *dev,
7520 				    struct amdgpu_display_manager *dm,
7521 				    struct drm_crtc *pcrtc,
7522 				    bool wait_for_vblank)
7523 {
7524 	uint32_t i;
7525 	uint64_t timestamp_ns;
7526 	struct drm_plane *plane;
7527 	struct drm_plane_state *old_plane_state, *new_plane_state;
7528 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7529 	struct drm_crtc_state *new_pcrtc_state =
7530 			drm_atomic_get_new_crtc_state(state, pcrtc);
7531 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7532 	struct dm_crtc_state *dm_old_crtc_state =
7533 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7534 	int planes_count = 0, vpos, hpos;
7535 	unsigned long flags;
7536 	uint32_t target_vblank, last_flip_vblank;
7537 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7538 	bool cursor_update = false;
7539 	bool pflip_present = false;
7540 	struct {
7541 		struct dc_surface_update surface_updates[MAX_SURFACES];
7542 		struct dc_plane_info plane_infos[MAX_SURFACES];
7543 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7544 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7545 		struct dc_stream_update stream_update;
7546 	} *bundle;
7547 
7548 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7549 
7550 	if (!bundle) {
7551 		dm_error("Failed to allocate update bundle\n");
7552 		goto cleanup;
7553 	}
7554 
7555 	/*
7556 	 * Disable the cursor first if we're disabling all the planes.
7557 	 * It'll remain on the screen after the planes are re-enabled
7558 	 * if we don't.
7559 	 */
7560 	if (acrtc_state->active_planes == 0)
7561 		amdgpu_dm_commit_cursors(state);
7562 
7563 	/* update planes when needed */
7564 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7565 		struct drm_crtc *crtc = new_plane_state->crtc;
7566 		struct drm_crtc_state *new_crtc_state;
7567 		struct drm_framebuffer *fb = new_plane_state->fb;
7568 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7569 		bool plane_needs_flip;
7570 		struct dc_plane_state *dc_plane;
7571 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7572 
7573 		/* Cursor plane is handled after stream updates */
7574 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7575 			if ((fb && crtc == pcrtc) ||
7576 			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7577 				cursor_update = true;
7578 
7579 			continue;
7580 		}
7581 
7582 		if (!fb || !crtc || pcrtc != crtc)
7583 			continue;
7584 
7585 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7586 		if (!new_crtc_state->active)
7587 			continue;
7588 
7589 		dc_plane = dm_new_plane_state->dc_state;
7590 
7591 		bundle->surface_updates[planes_count].surface = dc_plane;
7592 		if (new_pcrtc_state->color_mgmt_changed) {
7593 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7594 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7595 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7596 		}
7597 
7598 		fill_dc_scaling_info(dm->adev, new_plane_state,
7599 				     &bundle->scaling_infos[planes_count]);
7600 
7601 		bundle->surface_updates[planes_count].scaling_info =
7602 			&bundle->scaling_infos[planes_count];
7603 
7604 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7605 
7606 		pflip_present = pflip_present || plane_needs_flip;
7607 
7608 		if (!plane_needs_flip) {
7609 			planes_count += 1;
7610 			continue;
7611 		}
7612 
7613 		fill_dc_plane_info_and_addr(
7614 			dm->adev, new_plane_state,
7615 			afb->tiling_flags,
7616 			&bundle->plane_infos[planes_count],
7617 			&bundle->flip_addrs[planes_count].address,
7618 			afb->tmz_surface, false);
7619 
7620 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7621 				 new_plane_state->plane->index,
7622 				 bundle->plane_infos[planes_count].dcc.enable);
7623 
7624 		bundle->surface_updates[planes_count].plane_info =
7625 			&bundle->plane_infos[planes_count];
7626 
7627 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
7628 				    new_crtc_state,
7629 				    &bundle->flip_addrs[planes_count]);
7630 
7631 		/*
7632 		 * Only allow immediate flips for fast updates that don't
7633 		 * change FB pitch, DCC state, rotation or mirroing.
7634 		 */
7635 		bundle->flip_addrs[planes_count].flip_immediate =
7636 			crtc->state->async_flip &&
7637 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7638 
7639 		timestamp_ns = ktime_get_ns();
7640 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7641 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7642 		bundle->surface_updates[planes_count].surface = dc_plane;
7643 
7644 		if (!bundle->surface_updates[planes_count].surface) {
7645 			DRM_ERROR("No surface for CRTC: id=%d\n",
7646 					acrtc_attach->crtc_id);
7647 			continue;
7648 		}
7649 
7650 		if (plane == pcrtc->primary)
7651 			update_freesync_state_on_stream(
7652 				dm,
7653 				acrtc_state,
7654 				acrtc_state->stream,
7655 				dc_plane,
7656 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7657 
7658 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7659 				 __func__,
7660 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7661 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7662 
7663 		planes_count += 1;
7664 
7665 	}
7666 
7667 	if (pflip_present) {
7668 		if (!vrr_active) {
7669 			/* Use old throttling in non-vrr fixed refresh rate mode
7670 			 * to keep flip scheduling based on target vblank counts
7671 			 * working in a backwards compatible way, e.g., for
7672 			 * clients using the GLX_OML_sync_control extension or
7673 			 * DRI3/Present extension with defined target_msc.
7674 			 */
7675 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7676 		}
7677 		else {
7678 			/* For variable refresh rate mode only:
7679 			 * Get vblank of last completed flip to avoid > 1 vrr
7680 			 * flips per video frame by use of throttling, but allow
7681 			 * flip programming anywhere in the possibly large
7682 			 * variable vrr vblank interval for fine-grained flip
7683 			 * timing control and more opportunity to avoid stutter
7684 			 * on late submission of flips.
7685 			 */
7686 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7687 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7688 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7689 		}
7690 
7691 		target_vblank = last_flip_vblank + wait_for_vblank;
7692 
7693 		/*
7694 		 * Wait until we're out of the vertical blank period before the one
7695 		 * targeted by the flip
7696 		 */
7697 		while ((acrtc_attach->enabled &&
7698 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7699 							    0, &vpos, &hpos, NULL,
7700 							    NULL, &pcrtc->hwmode)
7701 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7702 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7703 			(int)(target_vblank -
7704 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7705 			usleep_range(1000, 1100);
7706 		}
7707 
7708 		/**
7709 		 * Prepare the flip event for the pageflip interrupt to handle.
7710 		 *
7711 		 * This only works in the case where we've already turned on the
7712 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7713 		 * from 0 -> n planes we have to skip a hardware generated event
7714 		 * and rely on sending it from software.
7715 		 */
7716 		if (acrtc_attach->base.state->event &&
7717 		    acrtc_state->active_planes > 0) {
7718 			drm_crtc_vblank_get(pcrtc);
7719 
7720 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7721 
7722 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7723 			prepare_flip_isr(acrtc_attach);
7724 
7725 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7726 		}
7727 
7728 		if (acrtc_state->stream) {
7729 			if (acrtc_state->freesync_vrr_info_changed)
7730 				bundle->stream_update.vrr_infopacket =
7731 					&acrtc_state->stream->vrr_infopacket;
7732 		}
7733 	} else if (cursor_update && acrtc_state->active_planes > 0 &&
7734 		   acrtc_attach->base.state->event) {
7735 		drm_crtc_vblank_get(pcrtc);
7736 
7737 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7738 
7739 		acrtc_attach->event = acrtc_attach->base.state->event;
7740 		acrtc_attach->base.state->event = NULL;
7741 
7742 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7743 	}
7744 
7745 	/* Update the planes if changed or disable if we don't have any. */
7746 	if ((planes_count || acrtc_state->active_planes == 0) &&
7747 		acrtc_state->stream) {
7748 		/*
7749 		 * If PSR or idle optimizations are enabled then flush out
7750 		 * any pending work before hardware programming.
7751 		 */
7752 		if (dm->vblank_control_workqueue)
7753 			flush_workqueue(dm->vblank_control_workqueue);
7754 
7755 		bundle->stream_update.stream = acrtc_state->stream;
7756 		if (new_pcrtc_state->mode_changed) {
7757 			bundle->stream_update.src = acrtc_state->stream->src;
7758 			bundle->stream_update.dst = acrtc_state->stream->dst;
7759 		}
7760 
7761 		if (new_pcrtc_state->color_mgmt_changed) {
7762 			/*
7763 			 * TODO: This isn't fully correct since we've actually
7764 			 * already modified the stream in place.
7765 			 */
7766 			bundle->stream_update.gamut_remap =
7767 				&acrtc_state->stream->gamut_remap_matrix;
7768 			bundle->stream_update.output_csc_transform =
7769 				&acrtc_state->stream->csc_color_matrix;
7770 			bundle->stream_update.out_transfer_func =
7771 				acrtc_state->stream->out_transfer_func;
7772 		}
7773 
7774 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7775 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7776 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7777 
7778 		/*
7779 		 * If FreeSync state on the stream has changed then we need to
7780 		 * re-adjust the min/max bounds now that DC doesn't handle this
7781 		 * as part of commit.
7782 		 */
7783 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7784 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7785 			dc_stream_adjust_vmin_vmax(
7786 				dm->dc, acrtc_state->stream,
7787 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7788 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7789 		}
7790 		mutex_lock(&dm->dc_lock);
7791 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7792 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7793 			amdgpu_dm_psr_disable(acrtc_state->stream);
7794 
7795 		dc_commit_updates_for_stream(dm->dc,
7796 						     bundle->surface_updates,
7797 						     planes_count,
7798 						     acrtc_state->stream,
7799 						     &bundle->stream_update,
7800 						     dc_state);
7801 
7802 		/**
7803 		 * Enable or disable the interrupts on the backend.
7804 		 *
7805 		 * Most pipes are put into power gating when unused.
7806 		 *
7807 		 * When power gating is enabled on a pipe we lose the
7808 		 * interrupt enablement state when power gating is disabled.
7809 		 *
7810 		 * So we need to update the IRQ control state in hardware
7811 		 * whenever the pipe turns on (since it could be previously
7812 		 * power gated) or off (since some pipes can't be power gated
7813 		 * on some ASICs).
7814 		 */
7815 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7816 			dm_update_pflip_irq_state(drm_to_adev(dev),
7817 						  acrtc_attach);
7818 
7819 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7820 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7821 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7822 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7823 
7824 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
7825 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7826 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7827 			struct amdgpu_dm_connector *aconn =
7828 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7829 
7830 			if (aconn->psr_skip_count > 0)
7831 				aconn->psr_skip_count--;
7832 
7833 			/* Allow PSR when skip count is 0. */
7834 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7835 
7836 			/*
7837 			 * If sink supports PSR SU, there is no need to rely on
7838 			 * a vblank event disable request to enable PSR. PSR SU
7839 			 * can be enabled immediately once OS demonstrates an
7840 			 * adequate number of fast atomic commits to notify KMD
7841 			 * of update events. See `vblank_control_worker()`.
7842 			 */
7843 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7844 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
7845 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
7846 				amdgpu_dm_psr_enable(acrtc_state->stream);
7847 		} else {
7848 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
7849 		}
7850 
7851 		mutex_unlock(&dm->dc_lock);
7852 	}
7853 
7854 	/*
7855 	 * Update cursor state *after* programming all the planes.
7856 	 * This avoids redundant programming in the case where we're going
7857 	 * to be disabling a single plane - those pipes are being disabled.
7858 	 */
7859 	if (acrtc_state->active_planes)
7860 		amdgpu_dm_commit_cursors(state);
7861 
7862 cleanup:
7863 	kfree(bundle);
7864 }
7865 
7866 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7867 				   struct drm_atomic_state *state)
7868 {
7869 	struct amdgpu_device *adev = drm_to_adev(dev);
7870 	struct amdgpu_dm_connector *aconnector;
7871 	struct drm_connector *connector;
7872 	struct drm_connector_state *old_con_state, *new_con_state;
7873 	struct drm_crtc_state *new_crtc_state;
7874 	struct dm_crtc_state *new_dm_crtc_state;
7875 	const struct dc_stream_status *status;
7876 	int i, inst;
7877 
7878 	/* Notify device removals. */
7879 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7880 		if (old_con_state->crtc != new_con_state->crtc) {
7881 			/* CRTC changes require notification. */
7882 			goto notify;
7883 		}
7884 
7885 		if (!new_con_state->crtc)
7886 			continue;
7887 
7888 		new_crtc_state = drm_atomic_get_new_crtc_state(
7889 			state, new_con_state->crtc);
7890 
7891 		if (!new_crtc_state)
7892 			continue;
7893 
7894 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7895 			continue;
7896 
7897 	notify:
7898 		aconnector = to_amdgpu_dm_connector(connector);
7899 
7900 		mutex_lock(&adev->dm.audio_lock);
7901 		inst = aconnector->audio_inst;
7902 		aconnector->audio_inst = -1;
7903 		mutex_unlock(&adev->dm.audio_lock);
7904 
7905 		amdgpu_dm_audio_eld_notify(adev, inst);
7906 	}
7907 
7908 	/* Notify audio device additions. */
7909 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7910 		if (!new_con_state->crtc)
7911 			continue;
7912 
7913 		new_crtc_state = drm_atomic_get_new_crtc_state(
7914 			state, new_con_state->crtc);
7915 
7916 		if (!new_crtc_state)
7917 			continue;
7918 
7919 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7920 			continue;
7921 
7922 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7923 		if (!new_dm_crtc_state->stream)
7924 			continue;
7925 
7926 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7927 		if (!status)
7928 			continue;
7929 
7930 		aconnector = to_amdgpu_dm_connector(connector);
7931 
7932 		mutex_lock(&adev->dm.audio_lock);
7933 		inst = status->audio_inst;
7934 		aconnector->audio_inst = inst;
7935 		mutex_unlock(&adev->dm.audio_lock);
7936 
7937 		amdgpu_dm_audio_eld_notify(adev, inst);
7938 	}
7939 }
7940 
7941 /*
7942  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7943  * @crtc_state: the DRM CRTC state
7944  * @stream_state: the DC stream state.
7945  *
7946  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7947  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7948  */
7949 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7950 						struct dc_stream_state *stream_state)
7951 {
7952 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7953 }
7954 
7955 /**
7956  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7957  * @state: The atomic state to commit
7958  *
7959  * This will tell DC to commit the constructed DC state from atomic_check,
7960  * programming the hardware. Any failures here implies a hardware failure, since
7961  * atomic check should have filtered anything non-kosher.
7962  */
7963 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7964 {
7965 	struct drm_device *dev = state->dev;
7966 	struct amdgpu_device *adev = drm_to_adev(dev);
7967 	struct amdgpu_display_manager *dm = &adev->dm;
7968 	struct dm_atomic_state *dm_state;
7969 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7970 	uint32_t i, j;
7971 	struct drm_crtc *crtc;
7972 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7973 	unsigned long flags;
7974 	bool wait_for_vblank = true;
7975 	struct drm_connector *connector;
7976 	struct drm_connector_state *old_con_state, *new_con_state;
7977 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7978 	int crtc_disable_count = 0;
7979 	bool mode_set_reset_required = false;
7980 	int r;
7981 
7982 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
7983 
7984 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
7985 	if (unlikely(r))
7986 		DRM_ERROR("Waiting for fences timed out!");
7987 
7988 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7989 
7990 	dm_state = dm_atomic_get_new_state(state);
7991 	if (dm_state && dm_state->context) {
7992 		dc_state = dm_state->context;
7993 	} else {
7994 		/* No state changes, retain current state. */
7995 		dc_state_temp = dc_create_state(dm->dc);
7996 		ASSERT(dc_state_temp);
7997 		dc_state = dc_state_temp;
7998 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7999 	}
8000 
8001 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8002 				       new_crtc_state, i) {
8003 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8004 
8005 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8006 
8007 		if (old_crtc_state->active &&
8008 		    (!new_crtc_state->active ||
8009 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8010 			manage_dm_interrupts(adev, acrtc, false);
8011 			dc_stream_release(dm_old_crtc_state->stream);
8012 		}
8013 	}
8014 
8015 	drm_atomic_helper_calc_timestamping_constants(state);
8016 
8017 	/* update changed items */
8018 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8019 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8020 
8021 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8022 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8023 
8024 		drm_dbg_state(state->dev,
8025 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8026 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8027 			"connectors_changed:%d\n",
8028 			acrtc->crtc_id,
8029 			new_crtc_state->enable,
8030 			new_crtc_state->active,
8031 			new_crtc_state->planes_changed,
8032 			new_crtc_state->mode_changed,
8033 			new_crtc_state->active_changed,
8034 			new_crtc_state->connectors_changed);
8035 
8036 		/* Disable cursor if disabling crtc */
8037 		if (old_crtc_state->active && !new_crtc_state->active) {
8038 			struct dc_cursor_position position;
8039 
8040 			memset(&position, 0, sizeof(position));
8041 			mutex_lock(&dm->dc_lock);
8042 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8043 			mutex_unlock(&dm->dc_lock);
8044 		}
8045 
8046 		/* Copy all transient state flags into dc state */
8047 		if (dm_new_crtc_state->stream) {
8048 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8049 							    dm_new_crtc_state->stream);
8050 		}
8051 
8052 		/* handles headless hotplug case, updating new_state and
8053 		 * aconnector as needed
8054 		 */
8055 
8056 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8057 
8058 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8059 
8060 			if (!dm_new_crtc_state->stream) {
8061 				/*
8062 				 * this could happen because of issues with
8063 				 * userspace notifications delivery.
8064 				 * In this case userspace tries to set mode on
8065 				 * display which is disconnected in fact.
8066 				 * dc_sink is NULL in this case on aconnector.
8067 				 * We expect reset mode will come soon.
8068 				 *
8069 				 * This can also happen when unplug is done
8070 				 * during resume sequence ended
8071 				 *
8072 				 * In this case, we want to pretend we still
8073 				 * have a sink to keep the pipe running so that
8074 				 * hw state is consistent with the sw state
8075 				 */
8076 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8077 						__func__, acrtc->base.base.id);
8078 				continue;
8079 			}
8080 
8081 			if (dm_old_crtc_state->stream)
8082 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8083 
8084 			pm_runtime_get_noresume(dev->dev);
8085 
8086 			acrtc->enabled = true;
8087 			acrtc->hw_mode = new_crtc_state->mode;
8088 			crtc->hwmode = new_crtc_state->mode;
8089 			mode_set_reset_required = true;
8090 		} else if (modereset_required(new_crtc_state)) {
8091 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8092 			/* i.e. reset mode */
8093 			if (dm_old_crtc_state->stream)
8094 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8095 
8096 			mode_set_reset_required = true;
8097 		}
8098 	} /* for_each_crtc_in_state() */
8099 
8100 	if (dc_state) {
8101 		/* if there mode set or reset, disable eDP PSR */
8102 		if (mode_set_reset_required) {
8103 			if (dm->vblank_control_workqueue)
8104 				flush_workqueue(dm->vblank_control_workqueue);
8105 
8106 			amdgpu_dm_psr_disable_all(dm);
8107 		}
8108 
8109 		dm_enable_per_frame_crtc_master_sync(dc_state);
8110 		mutex_lock(&dm->dc_lock);
8111 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8112 
8113 		/* Allow idle optimization when vblank count is 0 for display off */
8114 		if (dm->active_vblank_irq_count == 0)
8115 			dc_allow_idle_optimizations(dm->dc, true);
8116 		mutex_unlock(&dm->dc_lock);
8117 	}
8118 
8119 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8120 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8121 
8122 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8123 
8124 		if (dm_new_crtc_state->stream != NULL) {
8125 			const struct dc_stream_status *status =
8126 					dc_stream_get_status(dm_new_crtc_state->stream);
8127 
8128 			if (!status)
8129 				status = dc_stream_get_status_from_state(dc_state,
8130 									 dm_new_crtc_state->stream);
8131 			if (!status)
8132 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8133 			else
8134 				acrtc->otg_inst = status->primary_otg_inst;
8135 		}
8136 	}
8137 #ifdef CONFIG_DRM_AMD_DC_HDCP
8138 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8139 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8140 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8141 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8142 
8143 		new_crtc_state = NULL;
8144 
8145 		if (acrtc)
8146 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8147 
8148 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8149 
8150 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8151 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8152 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8153 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8154 			dm_new_con_state->update_hdcp = true;
8155 			continue;
8156 		}
8157 
8158 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8159 			hdcp_update_display(
8160 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8161 				new_con_state->hdcp_content_type,
8162 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8163 	}
8164 #endif
8165 
8166 	/* Handle connector state changes */
8167 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8168 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8169 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8170 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8171 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8172 		struct dc_stream_update stream_update;
8173 		struct dc_info_packet hdr_packet;
8174 		struct dc_stream_status *status = NULL;
8175 		bool abm_changed, hdr_changed, scaling_changed;
8176 
8177 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8178 		memset(&stream_update, 0, sizeof(stream_update));
8179 
8180 		if (acrtc) {
8181 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8182 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8183 		}
8184 
8185 		/* Skip any modesets/resets */
8186 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8187 			continue;
8188 
8189 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8190 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8191 
8192 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8193 							     dm_old_con_state);
8194 
8195 		abm_changed = dm_new_crtc_state->abm_level !=
8196 			      dm_old_crtc_state->abm_level;
8197 
8198 		hdr_changed =
8199 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8200 
8201 		if (!scaling_changed && !abm_changed && !hdr_changed)
8202 			continue;
8203 
8204 		stream_update.stream = dm_new_crtc_state->stream;
8205 		if (scaling_changed) {
8206 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8207 					dm_new_con_state, dm_new_crtc_state->stream);
8208 
8209 			stream_update.src = dm_new_crtc_state->stream->src;
8210 			stream_update.dst = dm_new_crtc_state->stream->dst;
8211 		}
8212 
8213 		if (abm_changed) {
8214 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8215 
8216 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8217 		}
8218 
8219 		if (hdr_changed) {
8220 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8221 			stream_update.hdr_static_metadata = &hdr_packet;
8222 		}
8223 
8224 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8225 
8226 		if (WARN_ON(!status))
8227 			continue;
8228 
8229 		WARN_ON(!status->plane_count);
8230 
8231 		/*
8232 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8233 		 * Here we create an empty update on each plane.
8234 		 * To fix this, DC should permit updating only stream properties.
8235 		 */
8236 		for (j = 0; j < status->plane_count; j++)
8237 			dummy_updates[j].surface = status->plane_states[0];
8238 
8239 
8240 		mutex_lock(&dm->dc_lock);
8241 		dc_commit_updates_for_stream(dm->dc,
8242 						     dummy_updates,
8243 						     status->plane_count,
8244 						     dm_new_crtc_state->stream,
8245 						     &stream_update,
8246 						     dc_state);
8247 		mutex_unlock(&dm->dc_lock);
8248 	}
8249 
8250 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8251 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8252 				      new_crtc_state, i) {
8253 		if (old_crtc_state->active && !new_crtc_state->active)
8254 			crtc_disable_count++;
8255 
8256 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8257 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8258 
8259 		/* For freesync config update on crtc state and params for irq */
8260 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8261 
8262 		/* Handle vrr on->off / off->on transitions */
8263 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8264 						dm_new_crtc_state);
8265 	}
8266 
8267 	/**
8268 	 * Enable interrupts for CRTCs that are newly enabled or went through
8269 	 * a modeset. It was intentionally deferred until after the front end
8270 	 * state was modified to wait until the OTG was on and so the IRQ
8271 	 * handlers didn't access stale or invalid state.
8272 	 */
8273 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8274 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8275 #ifdef CONFIG_DEBUG_FS
8276 		bool configure_crc = false;
8277 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8278 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8279 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8280 #endif
8281 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8282 		cur_crc_src = acrtc->dm_irq_params.crc_src;
8283 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8284 #endif
8285 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8286 
8287 		if (new_crtc_state->active &&
8288 		    (!old_crtc_state->active ||
8289 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8290 			dc_stream_retain(dm_new_crtc_state->stream);
8291 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8292 			manage_dm_interrupts(adev, acrtc, true);
8293 
8294 #ifdef CONFIG_DEBUG_FS
8295 			/**
8296 			 * Frontend may have changed so reapply the CRC capture
8297 			 * settings for the stream.
8298 			 */
8299 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8300 
8301 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8302 				configure_crc = true;
8303 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8304 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
8305 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8306 					acrtc->dm_irq_params.crc_window.update_win = true;
8307 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8308 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8309 					crc_rd_wrk->crtc = crtc;
8310 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8311 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8312 				}
8313 #endif
8314 			}
8315 
8316 			if (configure_crc)
8317 				if (amdgpu_dm_crtc_configure_crc_source(
8318 					crtc, dm_new_crtc_state, cur_crc_src))
8319 					DRM_DEBUG_DRIVER("Failed to configure crc source");
8320 #endif
8321 		}
8322 	}
8323 
8324 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8325 		if (new_crtc_state->async_flip)
8326 			wait_for_vblank = false;
8327 
8328 	/* update planes when needed per crtc*/
8329 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8330 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8331 
8332 		if (dm_new_crtc_state->stream)
8333 			amdgpu_dm_commit_planes(state, dc_state, dev,
8334 						dm, crtc, wait_for_vblank);
8335 	}
8336 
8337 	/* Update audio instances for each connector. */
8338 	amdgpu_dm_commit_audio(dev, state);
8339 
8340 	/* restore the backlight level */
8341 	for (i = 0; i < dm->num_of_edps; i++) {
8342 		if (dm->backlight_dev[i] &&
8343 		    (dm->actual_brightness[i] != dm->brightness[i]))
8344 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8345 	}
8346 
8347 	/*
8348 	 * send vblank event on all events not handled in flip and
8349 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8350 	 */
8351 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8352 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8353 
8354 		if (new_crtc_state->event)
8355 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8356 
8357 		new_crtc_state->event = NULL;
8358 	}
8359 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8360 
8361 	/* Signal HW programming completion */
8362 	drm_atomic_helper_commit_hw_done(state);
8363 
8364 	if (wait_for_vblank)
8365 		drm_atomic_helper_wait_for_flip_done(dev, state);
8366 
8367 	drm_atomic_helper_cleanup_planes(dev, state);
8368 
8369 	/* return the stolen vga memory back to VRAM */
8370 	if (!adev->mman.keep_stolen_vga_memory)
8371 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8372 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8373 
8374 	/*
8375 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8376 	 * so we can put the GPU into runtime suspend if we're not driving any
8377 	 * displays anymore
8378 	 */
8379 	for (i = 0; i < crtc_disable_count; i++)
8380 		pm_runtime_put_autosuspend(dev->dev);
8381 	pm_runtime_mark_last_busy(dev->dev);
8382 
8383 	if (dc_state_temp)
8384 		dc_release_state(dc_state_temp);
8385 }
8386 
8387 
8388 static int dm_force_atomic_commit(struct drm_connector *connector)
8389 {
8390 	int ret = 0;
8391 	struct drm_device *ddev = connector->dev;
8392 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8393 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8394 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8395 	struct drm_connector_state *conn_state;
8396 	struct drm_crtc_state *crtc_state;
8397 	struct drm_plane_state *plane_state;
8398 
8399 	if (!state)
8400 		return -ENOMEM;
8401 
8402 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8403 
8404 	/* Construct an atomic state to restore previous display setting */
8405 
8406 	/*
8407 	 * Attach connectors to drm_atomic_state
8408 	 */
8409 	conn_state = drm_atomic_get_connector_state(state, connector);
8410 
8411 	ret = PTR_ERR_OR_ZERO(conn_state);
8412 	if (ret)
8413 		goto out;
8414 
8415 	/* Attach crtc to drm_atomic_state*/
8416 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8417 
8418 	ret = PTR_ERR_OR_ZERO(crtc_state);
8419 	if (ret)
8420 		goto out;
8421 
8422 	/* force a restore */
8423 	crtc_state->mode_changed = true;
8424 
8425 	/* Attach plane to drm_atomic_state */
8426 	plane_state = drm_atomic_get_plane_state(state, plane);
8427 
8428 	ret = PTR_ERR_OR_ZERO(plane_state);
8429 	if (ret)
8430 		goto out;
8431 
8432 	/* Call commit internally with the state we just constructed */
8433 	ret = drm_atomic_commit(state);
8434 
8435 out:
8436 	drm_atomic_state_put(state);
8437 	if (ret)
8438 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8439 
8440 	return ret;
8441 }
8442 
8443 /*
8444  * This function handles all cases when set mode does not come upon hotplug.
8445  * This includes when a display is unplugged then plugged back into the
8446  * same port and when running without usermode desktop manager supprot
8447  */
8448 void dm_restore_drm_connector_state(struct drm_device *dev,
8449 				    struct drm_connector *connector)
8450 {
8451 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8452 	struct amdgpu_crtc *disconnected_acrtc;
8453 	struct dm_crtc_state *acrtc_state;
8454 
8455 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8456 		return;
8457 
8458 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8459 	if (!disconnected_acrtc)
8460 		return;
8461 
8462 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8463 	if (!acrtc_state->stream)
8464 		return;
8465 
8466 	/*
8467 	 * If the previous sink is not released and different from the current,
8468 	 * we deduce we are in a state where we can not rely on usermode call
8469 	 * to turn on the display, so we do it here
8470 	 */
8471 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8472 		dm_force_atomic_commit(&aconnector->base);
8473 }
8474 
8475 /*
8476  * Grabs all modesetting locks to serialize against any blocking commits,
8477  * Waits for completion of all non blocking commits.
8478  */
8479 static int do_aquire_global_lock(struct drm_device *dev,
8480 				 struct drm_atomic_state *state)
8481 {
8482 	struct drm_crtc *crtc;
8483 	struct drm_crtc_commit *commit;
8484 	long ret;
8485 
8486 	/*
8487 	 * Adding all modeset locks to aquire_ctx will
8488 	 * ensure that when the framework release it the
8489 	 * extra locks we are locking here will get released to
8490 	 */
8491 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8492 	if (ret)
8493 		return ret;
8494 
8495 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8496 		spin_lock(&crtc->commit_lock);
8497 		commit = list_first_entry_or_null(&crtc->commit_list,
8498 				struct drm_crtc_commit, commit_entry);
8499 		if (commit)
8500 			drm_crtc_commit_get(commit);
8501 		spin_unlock(&crtc->commit_lock);
8502 
8503 		if (!commit)
8504 			continue;
8505 
8506 		/*
8507 		 * Make sure all pending HW programming completed and
8508 		 * page flips done
8509 		 */
8510 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8511 
8512 		if (ret > 0)
8513 			ret = wait_for_completion_interruptible_timeout(
8514 					&commit->flip_done, 10*HZ);
8515 
8516 		if (ret == 0)
8517 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8518 				  "timed out\n", crtc->base.id, crtc->name);
8519 
8520 		drm_crtc_commit_put(commit);
8521 	}
8522 
8523 	return ret < 0 ? ret : 0;
8524 }
8525 
8526 static void get_freesync_config_for_crtc(
8527 	struct dm_crtc_state *new_crtc_state,
8528 	struct dm_connector_state *new_con_state)
8529 {
8530 	struct mod_freesync_config config = {0};
8531 	struct amdgpu_dm_connector *aconnector =
8532 			to_amdgpu_dm_connector(new_con_state->base.connector);
8533 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8534 	int vrefresh = drm_mode_vrefresh(mode);
8535 	bool fs_vid_mode = false;
8536 
8537 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8538 					vrefresh >= aconnector->min_vfreq &&
8539 					vrefresh <= aconnector->max_vfreq;
8540 
8541 	if (new_crtc_state->vrr_supported) {
8542 		new_crtc_state->stream->ignore_msa_timing_param = true;
8543 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8544 
8545 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8546 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8547 		config.vsif_supported = true;
8548 		config.btr = true;
8549 
8550 		if (fs_vid_mode) {
8551 			config.state = VRR_STATE_ACTIVE_FIXED;
8552 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8553 			goto out;
8554 		} else if (new_crtc_state->base.vrr_enabled) {
8555 			config.state = VRR_STATE_ACTIVE_VARIABLE;
8556 		} else {
8557 			config.state = VRR_STATE_INACTIVE;
8558 		}
8559 	}
8560 out:
8561 	new_crtc_state->freesync_config = config;
8562 }
8563 
8564 static void reset_freesync_config_for_crtc(
8565 	struct dm_crtc_state *new_crtc_state)
8566 {
8567 	new_crtc_state->vrr_supported = false;
8568 
8569 	memset(&new_crtc_state->vrr_infopacket, 0,
8570 	       sizeof(new_crtc_state->vrr_infopacket));
8571 }
8572 
8573 static bool
8574 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8575 				 struct drm_crtc_state *new_crtc_state)
8576 {
8577 	const struct drm_display_mode *old_mode, *new_mode;
8578 
8579 	if (!old_crtc_state || !new_crtc_state)
8580 		return false;
8581 
8582 	old_mode = &old_crtc_state->mode;
8583 	new_mode = &new_crtc_state->mode;
8584 
8585 	if (old_mode->clock       == new_mode->clock &&
8586 	    old_mode->hdisplay    == new_mode->hdisplay &&
8587 	    old_mode->vdisplay    == new_mode->vdisplay &&
8588 	    old_mode->htotal      == new_mode->htotal &&
8589 	    old_mode->vtotal      != new_mode->vtotal &&
8590 	    old_mode->hsync_start == new_mode->hsync_start &&
8591 	    old_mode->vsync_start != new_mode->vsync_start &&
8592 	    old_mode->hsync_end   == new_mode->hsync_end &&
8593 	    old_mode->vsync_end   != new_mode->vsync_end &&
8594 	    old_mode->hskew       == new_mode->hskew &&
8595 	    old_mode->vscan       == new_mode->vscan &&
8596 	    (old_mode->vsync_end - old_mode->vsync_start) ==
8597 	    (new_mode->vsync_end - new_mode->vsync_start))
8598 		return true;
8599 
8600 	return false;
8601 }
8602 
8603 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8604 	uint64_t num, den, res;
8605 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8606 
8607 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8608 
8609 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8610 	den = (unsigned long long)new_crtc_state->mode.htotal *
8611 	      (unsigned long long)new_crtc_state->mode.vtotal;
8612 
8613 	res = div_u64(num, den);
8614 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8615 }
8616 
8617 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8618 			 struct drm_atomic_state *state,
8619 			 struct drm_crtc *crtc,
8620 			 struct drm_crtc_state *old_crtc_state,
8621 			 struct drm_crtc_state *new_crtc_state,
8622 			 bool enable,
8623 			 bool *lock_and_validation_needed)
8624 {
8625 	struct dm_atomic_state *dm_state = NULL;
8626 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8627 	struct dc_stream_state *new_stream;
8628 	int ret = 0;
8629 
8630 	/*
8631 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8632 	 * update changed items
8633 	 */
8634 	struct amdgpu_crtc *acrtc = NULL;
8635 	struct amdgpu_dm_connector *aconnector = NULL;
8636 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8637 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8638 
8639 	new_stream = NULL;
8640 
8641 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8642 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8643 	acrtc = to_amdgpu_crtc(crtc);
8644 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8645 
8646 	/* TODO This hack should go away */
8647 	if (aconnector && enable) {
8648 		/* Make sure fake sink is created in plug-in scenario */
8649 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8650 							    &aconnector->base);
8651 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8652 							    &aconnector->base);
8653 
8654 		if (IS_ERR(drm_new_conn_state)) {
8655 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8656 			goto fail;
8657 		}
8658 
8659 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8660 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8661 
8662 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8663 			goto skip_modeset;
8664 
8665 		new_stream = create_validate_stream_for_sink(aconnector,
8666 							     &new_crtc_state->mode,
8667 							     dm_new_conn_state,
8668 							     dm_old_crtc_state->stream);
8669 
8670 		/*
8671 		 * we can have no stream on ACTION_SET if a display
8672 		 * was disconnected during S3, in this case it is not an
8673 		 * error, the OS will be updated after detection, and
8674 		 * will do the right thing on next atomic commit
8675 		 */
8676 
8677 		if (!new_stream) {
8678 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8679 					__func__, acrtc->base.base.id);
8680 			ret = -ENOMEM;
8681 			goto fail;
8682 		}
8683 
8684 		/*
8685 		 * TODO: Check VSDB bits to decide whether this should
8686 		 * be enabled or not.
8687 		 */
8688 		new_stream->triggered_crtc_reset.enabled =
8689 			dm->force_timing_sync;
8690 
8691 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8692 
8693 		ret = fill_hdr_info_packet(drm_new_conn_state,
8694 					   &new_stream->hdr_static_metadata);
8695 		if (ret)
8696 			goto fail;
8697 
8698 		/*
8699 		 * If we already removed the old stream from the context
8700 		 * (and set the new stream to NULL) then we can't reuse
8701 		 * the old stream even if the stream and scaling are unchanged.
8702 		 * We'll hit the BUG_ON and black screen.
8703 		 *
8704 		 * TODO: Refactor this function to allow this check to work
8705 		 * in all conditions.
8706 		 */
8707 		if (dm_new_crtc_state->stream &&
8708 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8709 			goto skip_modeset;
8710 
8711 		if (dm_new_crtc_state->stream &&
8712 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8713 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8714 			new_crtc_state->mode_changed = false;
8715 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8716 					 new_crtc_state->mode_changed);
8717 		}
8718 	}
8719 
8720 	/* mode_changed flag may get updated above, need to check again */
8721 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8722 		goto skip_modeset;
8723 
8724 	drm_dbg_state(state->dev,
8725 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8726 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8727 		"connectors_changed:%d\n",
8728 		acrtc->crtc_id,
8729 		new_crtc_state->enable,
8730 		new_crtc_state->active,
8731 		new_crtc_state->planes_changed,
8732 		new_crtc_state->mode_changed,
8733 		new_crtc_state->active_changed,
8734 		new_crtc_state->connectors_changed);
8735 
8736 	/* Remove stream for any changed/disabled CRTC */
8737 	if (!enable) {
8738 
8739 		if (!dm_old_crtc_state->stream)
8740 			goto skip_modeset;
8741 
8742 		if (dm_new_crtc_state->stream &&
8743 		    is_timing_unchanged_for_freesync(new_crtc_state,
8744 						     old_crtc_state)) {
8745 			new_crtc_state->mode_changed = false;
8746 			DRM_DEBUG_DRIVER(
8747 				"Mode change not required for front porch change, "
8748 				"setting mode_changed to %d",
8749 				new_crtc_state->mode_changed);
8750 
8751 			set_freesync_fixed_config(dm_new_crtc_state);
8752 
8753 			goto skip_modeset;
8754 		} else if (aconnector &&
8755 			   is_freesync_video_mode(&new_crtc_state->mode,
8756 						  aconnector)) {
8757 			struct drm_display_mode *high_mode;
8758 
8759 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
8760 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8761 				set_freesync_fixed_config(dm_new_crtc_state);
8762 			}
8763 		}
8764 
8765 		ret = dm_atomic_get_state(state, &dm_state);
8766 		if (ret)
8767 			goto fail;
8768 
8769 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8770 				crtc->base.id);
8771 
8772 		/* i.e. reset mode */
8773 		if (dc_remove_stream_from_ctx(
8774 				dm->dc,
8775 				dm_state->context,
8776 				dm_old_crtc_state->stream) != DC_OK) {
8777 			ret = -EINVAL;
8778 			goto fail;
8779 		}
8780 
8781 		dc_stream_release(dm_old_crtc_state->stream);
8782 		dm_new_crtc_state->stream = NULL;
8783 
8784 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8785 
8786 		*lock_and_validation_needed = true;
8787 
8788 	} else {/* Add stream for any updated/enabled CRTC */
8789 		/*
8790 		 * Quick fix to prevent NULL pointer on new_stream when
8791 		 * added MST connectors not found in existing crtc_state in the chained mode
8792 		 * TODO: need to dig out the root cause of that
8793 		 */
8794 		if (!aconnector)
8795 			goto skip_modeset;
8796 
8797 		if (modereset_required(new_crtc_state))
8798 			goto skip_modeset;
8799 
8800 		if (modeset_required(new_crtc_state, new_stream,
8801 				     dm_old_crtc_state->stream)) {
8802 
8803 			WARN_ON(dm_new_crtc_state->stream);
8804 
8805 			ret = dm_atomic_get_state(state, &dm_state);
8806 			if (ret)
8807 				goto fail;
8808 
8809 			dm_new_crtc_state->stream = new_stream;
8810 
8811 			dc_stream_retain(new_stream);
8812 
8813 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8814 					 crtc->base.id);
8815 
8816 			if (dc_add_stream_to_ctx(
8817 					dm->dc,
8818 					dm_state->context,
8819 					dm_new_crtc_state->stream) != DC_OK) {
8820 				ret = -EINVAL;
8821 				goto fail;
8822 			}
8823 
8824 			*lock_and_validation_needed = true;
8825 		}
8826 	}
8827 
8828 skip_modeset:
8829 	/* Release extra reference */
8830 	if (new_stream)
8831 		 dc_stream_release(new_stream);
8832 
8833 	/*
8834 	 * We want to do dc stream updates that do not require a
8835 	 * full modeset below.
8836 	 */
8837 	if (!(enable && aconnector && new_crtc_state->active))
8838 		return 0;
8839 	/*
8840 	 * Given above conditions, the dc state cannot be NULL because:
8841 	 * 1. We're in the process of enabling CRTCs (just been added
8842 	 *    to the dc context, or already is on the context)
8843 	 * 2. Has a valid connector attached, and
8844 	 * 3. Is currently active and enabled.
8845 	 * => The dc stream state currently exists.
8846 	 */
8847 	BUG_ON(dm_new_crtc_state->stream == NULL);
8848 
8849 	/* Scaling or underscan settings */
8850 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8851 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8852 		update_stream_scaling_settings(
8853 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8854 
8855 	/* ABM settings */
8856 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8857 
8858 	/*
8859 	 * Color management settings. We also update color properties
8860 	 * when a modeset is needed, to ensure it gets reprogrammed.
8861 	 */
8862 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8863 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8864 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8865 		if (ret)
8866 			goto fail;
8867 	}
8868 
8869 	/* Update Freesync settings. */
8870 	get_freesync_config_for_crtc(dm_new_crtc_state,
8871 				     dm_new_conn_state);
8872 
8873 	return ret;
8874 
8875 fail:
8876 	if (new_stream)
8877 		dc_stream_release(new_stream);
8878 	return ret;
8879 }
8880 
8881 static bool should_reset_plane(struct drm_atomic_state *state,
8882 			       struct drm_plane *plane,
8883 			       struct drm_plane_state *old_plane_state,
8884 			       struct drm_plane_state *new_plane_state)
8885 {
8886 	struct drm_plane *other;
8887 	struct drm_plane_state *old_other_state, *new_other_state;
8888 	struct drm_crtc_state *new_crtc_state;
8889 	int i;
8890 
8891 	/*
8892 	 * TODO: Remove this hack once the checks below are sufficient
8893 	 * enough to determine when we need to reset all the planes on
8894 	 * the stream.
8895 	 */
8896 	if (state->allow_modeset)
8897 		return true;
8898 
8899 	/* Exit early if we know that we're adding or removing the plane. */
8900 	if (old_plane_state->crtc != new_plane_state->crtc)
8901 		return true;
8902 
8903 	/* old crtc == new_crtc == NULL, plane not in context. */
8904 	if (!new_plane_state->crtc)
8905 		return false;
8906 
8907 	new_crtc_state =
8908 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8909 
8910 	if (!new_crtc_state)
8911 		return true;
8912 
8913 	/* CRTC Degamma changes currently require us to recreate planes. */
8914 	if (new_crtc_state->color_mgmt_changed)
8915 		return true;
8916 
8917 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8918 		return true;
8919 
8920 	/*
8921 	 * If there are any new primary or overlay planes being added or
8922 	 * removed then the z-order can potentially change. To ensure
8923 	 * correct z-order and pipe acquisition the current DC architecture
8924 	 * requires us to remove and recreate all existing planes.
8925 	 *
8926 	 * TODO: Come up with a more elegant solution for this.
8927 	 */
8928 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8929 		struct amdgpu_framebuffer *old_afb, *new_afb;
8930 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8931 			continue;
8932 
8933 		if (old_other_state->crtc != new_plane_state->crtc &&
8934 		    new_other_state->crtc != new_plane_state->crtc)
8935 			continue;
8936 
8937 		if (old_other_state->crtc != new_other_state->crtc)
8938 			return true;
8939 
8940 		/* Src/dst size and scaling updates. */
8941 		if (old_other_state->src_w != new_other_state->src_w ||
8942 		    old_other_state->src_h != new_other_state->src_h ||
8943 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8944 		    old_other_state->crtc_h != new_other_state->crtc_h)
8945 			return true;
8946 
8947 		/* Rotation / mirroring updates. */
8948 		if (old_other_state->rotation != new_other_state->rotation)
8949 			return true;
8950 
8951 		/* Blending updates. */
8952 		if (old_other_state->pixel_blend_mode !=
8953 		    new_other_state->pixel_blend_mode)
8954 			return true;
8955 
8956 		/* Alpha updates. */
8957 		if (old_other_state->alpha != new_other_state->alpha)
8958 			return true;
8959 
8960 		/* Colorspace changes. */
8961 		if (old_other_state->color_range != new_other_state->color_range ||
8962 		    old_other_state->color_encoding != new_other_state->color_encoding)
8963 			return true;
8964 
8965 		/* Framebuffer checks fall at the end. */
8966 		if (!old_other_state->fb || !new_other_state->fb)
8967 			continue;
8968 
8969 		/* Pixel format changes can require bandwidth updates. */
8970 		if (old_other_state->fb->format != new_other_state->fb->format)
8971 			return true;
8972 
8973 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8974 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8975 
8976 		/* Tiling and DCC changes also require bandwidth updates. */
8977 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8978 		    old_afb->base.modifier != new_afb->base.modifier)
8979 			return true;
8980 	}
8981 
8982 	return false;
8983 }
8984 
8985 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8986 			      struct drm_plane_state *new_plane_state,
8987 			      struct drm_framebuffer *fb)
8988 {
8989 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8990 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8991 	unsigned int pitch;
8992 	bool linear;
8993 
8994 	if (fb->width > new_acrtc->max_cursor_width ||
8995 	    fb->height > new_acrtc->max_cursor_height) {
8996 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8997 				 new_plane_state->fb->width,
8998 				 new_plane_state->fb->height);
8999 		return -EINVAL;
9000 	}
9001 	if (new_plane_state->src_w != fb->width << 16 ||
9002 	    new_plane_state->src_h != fb->height << 16) {
9003 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9004 		return -EINVAL;
9005 	}
9006 
9007 	/* Pitch in pixels */
9008 	pitch = fb->pitches[0] / fb->format->cpp[0];
9009 
9010 	if (fb->width != pitch) {
9011 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9012 				 fb->width, pitch);
9013 		return -EINVAL;
9014 	}
9015 
9016 	switch (pitch) {
9017 	case 64:
9018 	case 128:
9019 	case 256:
9020 		/* FB pitch is supported by cursor plane */
9021 		break;
9022 	default:
9023 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9024 		return -EINVAL;
9025 	}
9026 
9027 	/* Core DRM takes care of checking FB modifiers, so we only need to
9028 	 * check tiling flags when the FB doesn't have a modifier. */
9029 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9030 		if (adev->family < AMDGPU_FAMILY_AI) {
9031 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9032 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9033 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9034 		} else {
9035 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9036 		}
9037 		if (!linear) {
9038 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9039 			return -EINVAL;
9040 		}
9041 	}
9042 
9043 	return 0;
9044 }
9045 
9046 static int dm_update_plane_state(struct dc *dc,
9047 				 struct drm_atomic_state *state,
9048 				 struct drm_plane *plane,
9049 				 struct drm_plane_state *old_plane_state,
9050 				 struct drm_plane_state *new_plane_state,
9051 				 bool enable,
9052 				 bool *lock_and_validation_needed)
9053 {
9054 
9055 	struct dm_atomic_state *dm_state = NULL;
9056 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9057 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9058 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9059 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9060 	struct amdgpu_crtc *new_acrtc;
9061 	bool needs_reset;
9062 	int ret = 0;
9063 
9064 
9065 	new_plane_crtc = new_plane_state->crtc;
9066 	old_plane_crtc = old_plane_state->crtc;
9067 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9068 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9069 
9070 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9071 		if (!enable || !new_plane_crtc ||
9072 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9073 			return 0;
9074 
9075 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9076 
9077 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9078 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9079 			return -EINVAL;
9080 		}
9081 
9082 		if (new_plane_state->fb) {
9083 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9084 						 new_plane_state->fb);
9085 			if (ret)
9086 				return ret;
9087 		}
9088 
9089 		return 0;
9090 	}
9091 
9092 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9093 					 new_plane_state);
9094 
9095 	/* Remove any changed/removed planes */
9096 	if (!enable) {
9097 		if (!needs_reset)
9098 			return 0;
9099 
9100 		if (!old_plane_crtc)
9101 			return 0;
9102 
9103 		old_crtc_state = drm_atomic_get_old_crtc_state(
9104 				state, old_plane_crtc);
9105 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9106 
9107 		if (!dm_old_crtc_state->stream)
9108 			return 0;
9109 
9110 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9111 				plane->base.id, old_plane_crtc->base.id);
9112 
9113 		ret = dm_atomic_get_state(state, &dm_state);
9114 		if (ret)
9115 			return ret;
9116 
9117 		if (!dc_remove_plane_from_context(
9118 				dc,
9119 				dm_old_crtc_state->stream,
9120 				dm_old_plane_state->dc_state,
9121 				dm_state->context)) {
9122 
9123 			return -EINVAL;
9124 		}
9125 
9126 
9127 		dc_plane_state_release(dm_old_plane_state->dc_state);
9128 		dm_new_plane_state->dc_state = NULL;
9129 
9130 		*lock_and_validation_needed = true;
9131 
9132 	} else { /* Add new planes */
9133 		struct dc_plane_state *dc_new_plane_state;
9134 
9135 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9136 			return 0;
9137 
9138 		if (!new_plane_crtc)
9139 			return 0;
9140 
9141 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9142 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9143 
9144 		if (!dm_new_crtc_state->stream)
9145 			return 0;
9146 
9147 		if (!needs_reset)
9148 			return 0;
9149 
9150 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9151 		if (ret)
9152 			return ret;
9153 
9154 		WARN_ON(dm_new_plane_state->dc_state);
9155 
9156 		dc_new_plane_state = dc_create_plane_state(dc);
9157 		if (!dc_new_plane_state)
9158 			return -ENOMEM;
9159 
9160 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9161 				 plane->base.id, new_plane_crtc->base.id);
9162 
9163 		ret = fill_dc_plane_attributes(
9164 			drm_to_adev(new_plane_crtc->dev),
9165 			dc_new_plane_state,
9166 			new_plane_state,
9167 			new_crtc_state);
9168 		if (ret) {
9169 			dc_plane_state_release(dc_new_plane_state);
9170 			return ret;
9171 		}
9172 
9173 		ret = dm_atomic_get_state(state, &dm_state);
9174 		if (ret) {
9175 			dc_plane_state_release(dc_new_plane_state);
9176 			return ret;
9177 		}
9178 
9179 		/*
9180 		 * Any atomic check errors that occur after this will
9181 		 * not need a release. The plane state will be attached
9182 		 * to the stream, and therefore part of the atomic
9183 		 * state. It'll be released when the atomic state is
9184 		 * cleaned.
9185 		 */
9186 		if (!dc_add_plane_to_context(
9187 				dc,
9188 				dm_new_crtc_state->stream,
9189 				dc_new_plane_state,
9190 				dm_state->context)) {
9191 
9192 			dc_plane_state_release(dc_new_plane_state);
9193 			return -EINVAL;
9194 		}
9195 
9196 		dm_new_plane_state->dc_state = dc_new_plane_state;
9197 
9198 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9199 
9200 		/* Tell DC to do a full surface update every time there
9201 		 * is a plane change. Inefficient, but works for now.
9202 		 */
9203 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9204 
9205 		*lock_and_validation_needed = true;
9206 	}
9207 
9208 
9209 	return ret;
9210 }
9211 
9212 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9213 				       int *src_w, int *src_h)
9214 {
9215 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9216 	case DRM_MODE_ROTATE_90:
9217 	case DRM_MODE_ROTATE_270:
9218 		*src_w = plane_state->src_h >> 16;
9219 		*src_h = plane_state->src_w >> 16;
9220 		break;
9221 	case DRM_MODE_ROTATE_0:
9222 	case DRM_MODE_ROTATE_180:
9223 	default:
9224 		*src_w = plane_state->src_w >> 16;
9225 		*src_h = plane_state->src_h >> 16;
9226 		break;
9227 	}
9228 }
9229 
9230 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9231 				struct drm_crtc *crtc,
9232 				struct drm_crtc_state *new_crtc_state)
9233 {
9234 	struct drm_plane *cursor = crtc->cursor, *underlying;
9235 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
9236 	int i;
9237 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9238 	int cursor_src_w, cursor_src_h;
9239 	int underlying_src_w, underlying_src_h;
9240 
9241 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9242 	 * cursor per pipe but it's going to inherit the scaling and
9243 	 * positioning from the underlying pipe. Check the cursor plane's
9244 	 * blending properties match the underlying planes'. */
9245 
9246 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9247 	if (!new_cursor_state || !new_cursor_state->fb) {
9248 		return 0;
9249 	}
9250 
9251 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9252 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9253 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9254 
9255 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9256 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
9257 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9258 			continue;
9259 
9260 		/* Ignore disabled planes */
9261 		if (!new_underlying_state->fb)
9262 			continue;
9263 
9264 		dm_get_oriented_plane_size(new_underlying_state,
9265 					   &underlying_src_w, &underlying_src_h);
9266 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9267 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9268 
9269 		if (cursor_scale_w != underlying_scale_w ||
9270 		    cursor_scale_h != underlying_scale_h) {
9271 			drm_dbg_atomic(crtc->dev,
9272 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9273 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9274 			return -EINVAL;
9275 		}
9276 
9277 		/* If this plane covers the whole CRTC, no need to check planes underneath */
9278 		if (new_underlying_state->crtc_x <= 0 &&
9279 		    new_underlying_state->crtc_y <= 0 &&
9280 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9281 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9282 			break;
9283 	}
9284 
9285 	return 0;
9286 }
9287 
9288 #if defined(CONFIG_DRM_AMD_DC_DCN)
9289 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9290 {
9291 	struct drm_connector *connector;
9292 	struct drm_connector_state *conn_state, *old_conn_state;
9293 	struct amdgpu_dm_connector *aconnector = NULL;
9294 	int i;
9295 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9296 		if (!conn_state->crtc)
9297 			conn_state = old_conn_state;
9298 
9299 		if (conn_state->crtc != crtc)
9300 			continue;
9301 
9302 		aconnector = to_amdgpu_dm_connector(connector);
9303 		if (!aconnector->port || !aconnector->mst_port)
9304 			aconnector = NULL;
9305 		else
9306 			break;
9307 	}
9308 
9309 	if (!aconnector)
9310 		return 0;
9311 
9312 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9313 }
9314 #endif
9315 
9316 /**
9317  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9318  * @dev: The DRM device
9319  * @state: The atomic state to commit
9320  *
9321  * Validate that the given atomic state is programmable by DC into hardware.
9322  * This involves constructing a &struct dc_state reflecting the new hardware
9323  * state we wish to commit, then querying DC to see if it is programmable. It's
9324  * important not to modify the existing DC state. Otherwise, atomic_check
9325  * may unexpectedly commit hardware changes.
9326  *
9327  * When validating the DC state, it's important that the right locks are
9328  * acquired. For full updates case which removes/adds/updates streams on one
9329  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9330  * that any such full update commit will wait for completion of any outstanding
9331  * flip using DRMs synchronization events.
9332  *
9333  * Note that DM adds the affected connectors for all CRTCs in state, when that
9334  * might not seem necessary. This is because DC stream creation requires the
9335  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9336  * be possible but non-trivial - a possible TODO item.
9337  *
9338  * Return: -Error code if validation failed.
9339  */
9340 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9341 				  struct drm_atomic_state *state)
9342 {
9343 	struct amdgpu_device *adev = drm_to_adev(dev);
9344 	struct dm_atomic_state *dm_state = NULL;
9345 	struct dc *dc = adev->dm.dc;
9346 	struct drm_connector *connector;
9347 	struct drm_connector_state *old_con_state, *new_con_state;
9348 	struct drm_crtc *crtc;
9349 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9350 	struct drm_plane *plane;
9351 	struct drm_plane_state *old_plane_state, *new_plane_state;
9352 	enum dc_status status;
9353 	int ret, i;
9354 	bool lock_and_validation_needed = false;
9355 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9356 #if defined(CONFIG_DRM_AMD_DC_DCN)
9357 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
9358 	struct drm_dp_mst_topology_state *mst_state;
9359 	struct drm_dp_mst_topology_mgr *mgr;
9360 #endif
9361 
9362 	trace_amdgpu_dm_atomic_check_begin(state);
9363 
9364 	ret = drm_atomic_helper_check_modeset(dev, state);
9365 	if (ret) {
9366 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9367 		goto fail;
9368 	}
9369 
9370 	/* Check connector changes */
9371 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9372 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9373 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9374 
9375 		/* Skip connectors that are disabled or part of modeset already. */
9376 		if (!old_con_state->crtc && !new_con_state->crtc)
9377 			continue;
9378 
9379 		if (!new_con_state->crtc)
9380 			continue;
9381 
9382 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9383 		if (IS_ERR(new_crtc_state)) {
9384 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9385 			ret = PTR_ERR(new_crtc_state);
9386 			goto fail;
9387 		}
9388 
9389 		if (dm_old_con_state->abm_level !=
9390 		    dm_new_con_state->abm_level)
9391 			new_crtc_state->connectors_changed = true;
9392 	}
9393 
9394 #if defined(CONFIG_DRM_AMD_DC_DCN)
9395 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9396 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9397 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9398 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9399 				if (ret) {
9400 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9401 					goto fail;
9402 				}
9403 			}
9404 		}
9405 		if (!pre_validate_dsc(state, &dm_state, vars)) {
9406 			ret = -EINVAL;
9407 			goto fail;
9408 		}
9409 	}
9410 #endif
9411 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9412 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9413 
9414 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9415 		    !new_crtc_state->color_mgmt_changed &&
9416 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9417 			dm_old_crtc_state->dsc_force_changed == false)
9418 			continue;
9419 
9420 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9421 		if (ret) {
9422 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9423 			goto fail;
9424 		}
9425 
9426 		if (!new_crtc_state->enable)
9427 			continue;
9428 
9429 		ret = drm_atomic_add_affected_connectors(state, crtc);
9430 		if (ret) {
9431 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9432 			goto fail;
9433 		}
9434 
9435 		ret = drm_atomic_add_affected_planes(state, crtc);
9436 		if (ret) {
9437 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9438 			goto fail;
9439 		}
9440 
9441 		if (dm_old_crtc_state->dsc_force_changed)
9442 			new_crtc_state->mode_changed = true;
9443 	}
9444 
9445 	/*
9446 	 * Add all primary and overlay planes on the CRTC to the state
9447 	 * whenever a plane is enabled to maintain correct z-ordering
9448 	 * and to enable fast surface updates.
9449 	 */
9450 	drm_for_each_crtc(crtc, dev) {
9451 		bool modified = false;
9452 
9453 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9454 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9455 				continue;
9456 
9457 			if (new_plane_state->crtc == crtc ||
9458 			    old_plane_state->crtc == crtc) {
9459 				modified = true;
9460 				break;
9461 			}
9462 		}
9463 
9464 		if (!modified)
9465 			continue;
9466 
9467 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9468 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9469 				continue;
9470 
9471 			new_plane_state =
9472 				drm_atomic_get_plane_state(state, plane);
9473 
9474 			if (IS_ERR(new_plane_state)) {
9475 				ret = PTR_ERR(new_plane_state);
9476 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9477 				goto fail;
9478 			}
9479 		}
9480 	}
9481 
9482 	/* Remove exiting planes if they are modified */
9483 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9484 		ret = dm_update_plane_state(dc, state, plane,
9485 					    old_plane_state,
9486 					    new_plane_state,
9487 					    false,
9488 					    &lock_and_validation_needed);
9489 		if (ret) {
9490 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9491 			goto fail;
9492 		}
9493 	}
9494 
9495 	/* Disable all crtcs which require disable */
9496 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9497 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9498 					   old_crtc_state,
9499 					   new_crtc_state,
9500 					   false,
9501 					   &lock_and_validation_needed);
9502 		if (ret) {
9503 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9504 			goto fail;
9505 		}
9506 	}
9507 
9508 	/* Enable all crtcs which require enable */
9509 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9510 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9511 					   old_crtc_state,
9512 					   new_crtc_state,
9513 					   true,
9514 					   &lock_and_validation_needed);
9515 		if (ret) {
9516 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9517 			goto fail;
9518 		}
9519 	}
9520 
9521 	/* Add new/modified planes */
9522 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9523 		ret = dm_update_plane_state(dc, state, plane,
9524 					    old_plane_state,
9525 					    new_plane_state,
9526 					    true,
9527 					    &lock_and_validation_needed);
9528 		if (ret) {
9529 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9530 			goto fail;
9531 		}
9532 	}
9533 
9534 	/* Run this here since we want to validate the streams we created */
9535 	ret = drm_atomic_helper_check_planes(dev, state);
9536 	if (ret) {
9537 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9538 		goto fail;
9539 	}
9540 
9541 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9542 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9543 		if (dm_new_crtc_state->mpo_requested)
9544 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9545 	}
9546 
9547 	/* Check cursor planes scaling */
9548 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9549 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9550 		if (ret) {
9551 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9552 			goto fail;
9553 		}
9554 	}
9555 
9556 	if (state->legacy_cursor_update) {
9557 		/*
9558 		 * This is a fast cursor update coming from the plane update
9559 		 * helper, check if it can be done asynchronously for better
9560 		 * performance.
9561 		 */
9562 		state->async_update =
9563 			!drm_atomic_helper_async_check(dev, state);
9564 
9565 		/*
9566 		 * Skip the remaining global validation if this is an async
9567 		 * update. Cursor updates can be done without affecting
9568 		 * state or bandwidth calcs and this avoids the performance
9569 		 * penalty of locking the private state object and
9570 		 * allocating a new dc_state.
9571 		 */
9572 		if (state->async_update)
9573 			return 0;
9574 	}
9575 
9576 	/* Check scaling and underscan changes*/
9577 	/* TODO Removed scaling changes validation due to inability to commit
9578 	 * new stream into context w\o causing full reset. Need to
9579 	 * decide how to handle.
9580 	 */
9581 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9582 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9583 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9584 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9585 
9586 		/* Skip any modesets/resets */
9587 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9588 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9589 			continue;
9590 
9591 		/* Skip any thing not scale or underscan changes */
9592 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9593 			continue;
9594 
9595 		lock_and_validation_needed = true;
9596 	}
9597 
9598 #if defined(CONFIG_DRM_AMD_DC_DCN)
9599 	/* set the slot info for each mst_state based on the link encoding format */
9600 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
9601 		struct amdgpu_dm_connector *aconnector;
9602 		struct drm_connector *connector;
9603 		struct drm_connector_list_iter iter;
9604 		u8 link_coding_cap;
9605 
9606 		if (!mgr->mst_state )
9607 			continue;
9608 
9609 		drm_connector_list_iter_begin(dev, &iter);
9610 		drm_for_each_connector_iter(connector, &iter) {
9611 			int id = connector->index;
9612 
9613 			if (id == mst_state->mgr->conn_base_id) {
9614 				aconnector = to_amdgpu_dm_connector(connector);
9615 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
9616 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
9617 
9618 				break;
9619 			}
9620 		}
9621 		drm_connector_list_iter_end(&iter);
9622 
9623 	}
9624 #endif
9625 	/**
9626 	 * Streams and planes are reset when there are changes that affect
9627 	 * bandwidth. Anything that affects bandwidth needs to go through
9628 	 * DC global validation to ensure that the configuration can be applied
9629 	 * to hardware.
9630 	 *
9631 	 * We have to currently stall out here in atomic_check for outstanding
9632 	 * commits to finish in this case because our IRQ handlers reference
9633 	 * DRM state directly - we can end up disabling interrupts too early
9634 	 * if we don't.
9635 	 *
9636 	 * TODO: Remove this stall and drop DM state private objects.
9637 	 */
9638 	if (lock_and_validation_needed) {
9639 		ret = dm_atomic_get_state(state, &dm_state);
9640 		if (ret) {
9641 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9642 			goto fail;
9643 		}
9644 
9645 		ret = do_aquire_global_lock(dev, state);
9646 		if (ret) {
9647 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9648 			goto fail;
9649 		}
9650 
9651 #if defined(CONFIG_DRM_AMD_DC_DCN)
9652 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
9653 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9654 			ret = -EINVAL;
9655 			goto fail;
9656 		}
9657 
9658 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9659 		if (ret) {
9660 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9661 			goto fail;
9662 		}
9663 #endif
9664 
9665 		/*
9666 		 * Perform validation of MST topology in the state:
9667 		 * We need to perform MST atomic check before calling
9668 		 * dc_validate_global_state(), or there is a chance
9669 		 * to get stuck in an infinite loop and hang eventually.
9670 		 */
9671 		ret = drm_dp_mst_atomic_check(state);
9672 		if (ret) {
9673 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9674 			goto fail;
9675 		}
9676 		status = dc_validate_global_state(dc, dm_state->context, true);
9677 		if (status != DC_OK) {
9678 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9679 				       dc_status_to_str(status), status);
9680 			ret = -EINVAL;
9681 			goto fail;
9682 		}
9683 	} else {
9684 		/*
9685 		 * The commit is a fast update. Fast updates shouldn't change
9686 		 * the DC context, affect global validation, and can have their
9687 		 * commit work done in parallel with other commits not touching
9688 		 * the same resource. If we have a new DC context as part of
9689 		 * the DM atomic state from validation we need to free it and
9690 		 * retain the existing one instead.
9691 		 *
9692 		 * Furthermore, since the DM atomic state only contains the DC
9693 		 * context and can safely be annulled, we can free the state
9694 		 * and clear the associated private object now to free
9695 		 * some memory and avoid a possible use-after-free later.
9696 		 */
9697 
9698 		for (i = 0; i < state->num_private_objs; i++) {
9699 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9700 
9701 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9702 				int j = state->num_private_objs-1;
9703 
9704 				dm_atomic_destroy_state(obj,
9705 						state->private_objs[i].state);
9706 
9707 				/* If i is not at the end of the array then the
9708 				 * last element needs to be moved to where i was
9709 				 * before the array can safely be truncated.
9710 				 */
9711 				if (i != j)
9712 					state->private_objs[i] =
9713 						state->private_objs[j];
9714 
9715 				state->private_objs[j].ptr = NULL;
9716 				state->private_objs[j].state = NULL;
9717 				state->private_objs[j].old_state = NULL;
9718 				state->private_objs[j].new_state = NULL;
9719 
9720 				state->num_private_objs = j;
9721 				break;
9722 			}
9723 		}
9724 	}
9725 
9726 	/* Store the overall update type for use later in atomic check. */
9727 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9728 		struct dm_crtc_state *dm_new_crtc_state =
9729 			to_dm_crtc_state(new_crtc_state);
9730 
9731 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9732 							 UPDATE_TYPE_FULL :
9733 							 UPDATE_TYPE_FAST;
9734 	}
9735 
9736 	/* Must be success */
9737 	WARN_ON(ret);
9738 
9739 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9740 
9741 	return ret;
9742 
9743 fail:
9744 	if (ret == -EDEADLK)
9745 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9746 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9747 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9748 	else
9749 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9750 
9751 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9752 
9753 	return ret;
9754 }
9755 
9756 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9757 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9758 {
9759 	uint8_t dpcd_data;
9760 	bool capable = false;
9761 
9762 	if (amdgpu_dm_connector->dc_link &&
9763 		dm_helpers_dp_read_dpcd(
9764 				NULL,
9765 				amdgpu_dm_connector->dc_link,
9766 				DP_DOWN_STREAM_PORT_COUNT,
9767 				&dpcd_data,
9768 				sizeof(dpcd_data))) {
9769 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9770 	}
9771 
9772 	return capable;
9773 }
9774 
9775 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9776 		unsigned int offset,
9777 		unsigned int total_length,
9778 		uint8_t *data,
9779 		unsigned int length,
9780 		struct amdgpu_hdmi_vsdb_info *vsdb)
9781 {
9782 	bool res;
9783 	union dmub_rb_cmd cmd;
9784 	struct dmub_cmd_send_edid_cea *input;
9785 	struct dmub_cmd_edid_cea_output *output;
9786 
9787 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9788 		return false;
9789 
9790 	memset(&cmd, 0, sizeof(cmd));
9791 
9792 	input = &cmd.edid_cea.data.input;
9793 
9794 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9795 	cmd.edid_cea.header.sub_type = 0;
9796 	cmd.edid_cea.header.payload_bytes =
9797 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9798 	input->offset = offset;
9799 	input->length = length;
9800 	input->cea_total_length = total_length;
9801 	memcpy(input->payload, data, length);
9802 
9803 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9804 	if (!res) {
9805 		DRM_ERROR("EDID CEA parser failed\n");
9806 		return false;
9807 	}
9808 
9809 	output = &cmd.edid_cea.data.output;
9810 
9811 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9812 		if (!output->ack.success) {
9813 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
9814 					output->ack.offset);
9815 		}
9816 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9817 		if (!output->amd_vsdb.vsdb_found)
9818 			return false;
9819 
9820 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9821 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9822 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9823 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9824 	} else {
9825 		DRM_WARN("Unknown EDID CEA parser results\n");
9826 		return false;
9827 	}
9828 
9829 	return true;
9830 }
9831 
9832 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9833 		uint8_t *edid_ext, int len,
9834 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9835 {
9836 	int i;
9837 
9838 	/* send extension block to DMCU for parsing */
9839 	for (i = 0; i < len; i += 8) {
9840 		bool res;
9841 		int offset;
9842 
9843 		/* send 8 bytes a time */
9844 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9845 			return false;
9846 
9847 		if (i+8 == len) {
9848 			/* EDID block sent completed, expect result */
9849 			int version, min_rate, max_rate;
9850 
9851 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9852 			if (res) {
9853 				/* amd vsdb found */
9854 				vsdb_info->freesync_supported = 1;
9855 				vsdb_info->amd_vsdb_version = version;
9856 				vsdb_info->min_refresh_rate_hz = min_rate;
9857 				vsdb_info->max_refresh_rate_hz = max_rate;
9858 				return true;
9859 			}
9860 			/* not amd vsdb */
9861 			return false;
9862 		}
9863 
9864 		/* check for ack*/
9865 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9866 		if (!res)
9867 			return false;
9868 	}
9869 
9870 	return false;
9871 }
9872 
9873 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9874 		uint8_t *edid_ext, int len,
9875 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9876 {
9877 	int i;
9878 
9879 	/* send extension block to DMCU for parsing */
9880 	for (i = 0; i < len; i += 8) {
9881 		/* send 8 bytes a time */
9882 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
9883 			return false;
9884 	}
9885 
9886 	return vsdb_info->freesync_supported;
9887 }
9888 
9889 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9890 		uint8_t *edid_ext, int len,
9891 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9892 {
9893 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9894 
9895 	if (adev->dm.dmub_srv)
9896 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
9897 	else
9898 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
9899 }
9900 
9901 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
9902 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9903 {
9904 	uint8_t *edid_ext = NULL;
9905 	int i;
9906 	bool valid_vsdb_found = false;
9907 
9908 	/*----- drm_find_cea_extension() -----*/
9909 	/* No EDID or EDID extensions */
9910 	if (edid == NULL || edid->extensions == 0)
9911 		return -ENODEV;
9912 
9913 	/* Find CEA extension */
9914 	for (i = 0; i < edid->extensions; i++) {
9915 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9916 		if (edid_ext[0] == CEA_EXT)
9917 			break;
9918 	}
9919 
9920 	if (i == edid->extensions)
9921 		return -ENODEV;
9922 
9923 	/*----- cea_db_offsets() -----*/
9924 	if (edid_ext[0] != CEA_EXT)
9925 		return -ENODEV;
9926 
9927 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
9928 
9929 	return valid_vsdb_found ? i : -ENODEV;
9930 }
9931 
9932 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9933 					struct edid *edid)
9934 {
9935 	int i = 0;
9936 	struct detailed_timing *timing;
9937 	struct detailed_non_pixel *data;
9938 	struct detailed_data_monitor_range *range;
9939 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9940 			to_amdgpu_dm_connector(connector);
9941 	struct dm_connector_state *dm_con_state = NULL;
9942 	struct dc_sink *sink;
9943 
9944 	struct drm_device *dev = connector->dev;
9945 	struct amdgpu_device *adev = drm_to_adev(dev);
9946 	bool freesync_capable = false;
9947 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
9948 
9949 	if (!connector->state) {
9950 		DRM_ERROR("%s - Connector has no state", __func__);
9951 		goto update;
9952 	}
9953 
9954 	sink = amdgpu_dm_connector->dc_sink ?
9955 		amdgpu_dm_connector->dc_sink :
9956 		amdgpu_dm_connector->dc_em_sink;
9957 
9958 	if (!edid || !sink) {
9959 		dm_con_state = to_dm_connector_state(connector->state);
9960 
9961 		amdgpu_dm_connector->min_vfreq = 0;
9962 		amdgpu_dm_connector->max_vfreq = 0;
9963 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9964 		connector->display_info.monitor_range.min_vfreq = 0;
9965 		connector->display_info.monitor_range.max_vfreq = 0;
9966 		freesync_capable = false;
9967 
9968 		goto update;
9969 	}
9970 
9971 	dm_con_state = to_dm_connector_state(connector->state);
9972 
9973 	if (!adev->dm.freesync_module)
9974 		goto update;
9975 
9976 
9977 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9978 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
9979 		bool edid_check_required = false;
9980 
9981 		if (edid) {
9982 			edid_check_required = is_dp_capable_without_timing_msa(
9983 						adev->dm.dc,
9984 						amdgpu_dm_connector);
9985 		}
9986 
9987 		if (edid_check_required == true && (edid->version > 1 ||
9988 		   (edid->version == 1 && edid->revision > 1))) {
9989 			for (i = 0; i < 4; i++) {
9990 
9991 				timing	= &edid->detailed_timings[i];
9992 				data	= &timing->data.other_data;
9993 				range	= &data->data.range;
9994 				/*
9995 				 * Check if monitor has continuous frequency mode
9996 				 */
9997 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
9998 					continue;
9999 				/*
10000 				 * Check for flag range limits only. If flag == 1 then
10001 				 * no additional timing information provided.
10002 				 * Default GTF, GTF Secondary curve and CVT are not
10003 				 * supported
10004 				 */
10005 				if (range->flags != 1)
10006 					continue;
10007 
10008 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10009 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10010 				amdgpu_dm_connector->pixel_clock_mhz =
10011 					range->pixel_clock_mhz * 10;
10012 
10013 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10014 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10015 
10016 				break;
10017 			}
10018 
10019 			if (amdgpu_dm_connector->max_vfreq -
10020 			    amdgpu_dm_connector->min_vfreq > 10) {
10021 
10022 				freesync_capable = true;
10023 			}
10024 		}
10025 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10026 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10027 		if (i >= 0 && vsdb_info.freesync_supported) {
10028 			timing  = &edid->detailed_timings[i];
10029 			data    = &timing->data.other_data;
10030 
10031 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10032 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10033 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10034 				freesync_capable = true;
10035 
10036 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10037 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10038 		}
10039 	}
10040 
10041 update:
10042 	if (dm_con_state)
10043 		dm_con_state->freesync_capable = freesync_capable;
10044 
10045 	if (connector->vrr_capable_property)
10046 		drm_connector_set_vrr_capable_property(connector,
10047 						       freesync_capable);
10048 }
10049 
10050 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10051 {
10052 	struct amdgpu_device *adev = drm_to_adev(dev);
10053 	struct dc *dc = adev->dm.dc;
10054 	int i;
10055 
10056 	mutex_lock(&adev->dm.dc_lock);
10057 	if (dc->current_state) {
10058 		for (i = 0; i < dc->current_state->stream_count; ++i)
10059 			dc->current_state->streams[i]
10060 				->triggered_crtc_reset.enabled =
10061 				adev->dm.force_timing_sync;
10062 
10063 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10064 		dc_trigger_sync(dc, dc->current_state);
10065 	}
10066 	mutex_unlock(&adev->dm.dc_lock);
10067 }
10068 
10069 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10070 		       uint32_t value, const char *func_name)
10071 {
10072 #ifdef DM_CHECK_ADDR_0
10073 	if (address == 0) {
10074 		DC_ERR("invalid register write. address = 0");
10075 		return;
10076 	}
10077 #endif
10078 	cgs_write_register(ctx->cgs_device, address, value);
10079 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10080 }
10081 
10082 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10083 			  const char *func_name)
10084 {
10085 	uint32_t value;
10086 #ifdef DM_CHECK_ADDR_0
10087 	if (address == 0) {
10088 		DC_ERR("invalid register read; address = 0\n");
10089 		return 0;
10090 	}
10091 #endif
10092 
10093 	if (ctx->dmub_srv &&
10094 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10095 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10096 		ASSERT(false);
10097 		return 0;
10098 	}
10099 
10100 	value = cgs_read_register(ctx->cgs_device, address);
10101 
10102 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10103 
10104 	return value;
10105 }
10106 
10107 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10108 						struct dc_context *ctx,
10109 						uint8_t status_type,
10110 						uint32_t *operation_result)
10111 {
10112 	struct amdgpu_device *adev = ctx->driver_context;
10113 	int return_status = -1;
10114 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
10115 
10116 	if (is_cmd_aux) {
10117 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10118 			return_status = p_notify->aux_reply.length;
10119 			*operation_result = p_notify->result;
10120 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10121 			*operation_result = AUX_RET_ERROR_TIMEOUT;
10122 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10123 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10124 		} else {
10125 			*operation_result = AUX_RET_ERROR_UNKNOWN;
10126 		}
10127 	} else {
10128 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10129 			return_status = 0;
10130 			*operation_result = p_notify->sc_status;
10131 		} else {
10132 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
10133 		}
10134 	}
10135 
10136 	return return_status;
10137 }
10138 
10139 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10140 	unsigned int link_index, void *cmd_payload, void *operation_result)
10141 {
10142 	struct amdgpu_device *adev = ctx->driver_context;
10143 	int ret = 0;
10144 
10145 	if (is_cmd_aux) {
10146 		dc_process_dmub_aux_transfer_async(ctx->dc,
10147 			link_index, (struct aux_payload *)cmd_payload);
10148 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10149 					(struct set_config_cmd_payload *)cmd_payload,
10150 					adev->dm.dmub_notify)) {
10151 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10152 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10153 					(uint32_t *)operation_result);
10154 	}
10155 
10156 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
10157 	if (ret == 0) {
10158 		DRM_ERROR("wait_for_completion_timeout timeout!");
10159 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10160 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10161 				(uint32_t *)operation_result);
10162 	}
10163 
10164 	if (is_cmd_aux) {
10165 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10166 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
10167 
10168 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10169 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10170 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10171 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10172 				       adev->dm.dmub_notify->aux_reply.length);
10173 			}
10174 		}
10175 	}
10176 
10177 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10178 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10179 			(uint32_t *)operation_result);
10180 }
10181 
10182 /*
10183  * Check whether seamless boot is supported.
10184  *
10185  * So far we only support seamless boot on CHIP_VANGOGH.
10186  * If everything goes well, we may consider expanding
10187  * seamless boot to other ASICs.
10188  */
10189 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10190 {
10191 	switch (adev->asic_type) {
10192 	case CHIP_VANGOGH:
10193 		if (!adev->mman.keep_stolen_vga_memory)
10194 			return true;
10195 		break;
10196 	default:
10197 		break;
10198 	}
10199 
10200 	return false;
10201 }
10202