xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 8c69d0298fb56f603e694cf0188e25b58dfe8b7e)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40 
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52 
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60 
61 #include "ivsrcid/ivsrcid_vislands30.h"
62 
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71 
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81 
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89 
90 #include "soc15_common.h"
91 #endif
92 
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96 
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
111 
112 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
114 
115 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
116 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
117 
118 /* Number of bytes in PSP header for firmware. */
119 #define PSP_HEADER_BYTES 0x100
120 
121 /* Number of bytes in PSP footer for firmware. */
122 #define PSP_FOOTER_BYTES 0x100
123 
124 /**
125  * DOC: overview
126  *
127  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
128  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
129  * requests into DC requests, and DC responses into DRM responses.
130  *
131  * The root control structure is &struct amdgpu_display_manager.
132  */
133 
134 /* basic init/fini API */
135 static int amdgpu_dm_init(struct amdgpu_device *adev);
136 static void amdgpu_dm_fini(struct amdgpu_device *adev);
137 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
138 
139 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
140 {
141 	switch (link->dpcd_caps.dongle_type) {
142 	case DISPLAY_DONGLE_NONE:
143 		return DRM_MODE_SUBCONNECTOR_Native;
144 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
145 		return DRM_MODE_SUBCONNECTOR_VGA;
146 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
147 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
148 		return DRM_MODE_SUBCONNECTOR_DVID;
149 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
150 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
151 		return DRM_MODE_SUBCONNECTOR_HDMIA;
152 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
153 	default:
154 		return DRM_MODE_SUBCONNECTOR_Unknown;
155 	}
156 }
157 
158 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
159 {
160 	struct dc_link *link = aconnector->dc_link;
161 	struct drm_connector *connector = &aconnector->base;
162 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
163 
164 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
165 		return;
166 
167 	if (aconnector->dc_sink)
168 		subconnector = get_subconnector_type(link);
169 
170 	drm_object_property_set_value(&connector->base,
171 			connector->dev->mode_config.dp_subconnector_property,
172 			subconnector);
173 }
174 
175 /*
176  * initializes drm_device display related structures, based on the information
177  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
178  * drm_encoder, drm_mode_config
179  *
180  * Returns 0 on success
181  */
182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
183 /* removes and deallocates the drm structures, created by the above function */
184 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
185 
186 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
187 				struct drm_plane *plane,
188 				unsigned long possible_crtcs,
189 				const struct dc_plane_cap *plane_cap);
190 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
191 			       struct drm_plane *plane,
192 			       uint32_t link_index);
193 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
194 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
195 				    uint32_t link_index,
196 				    struct amdgpu_encoder *amdgpu_encoder);
197 static int amdgpu_dm_encoder_init(struct drm_device *dev,
198 				  struct amdgpu_encoder *aencoder,
199 				  uint32_t link_index);
200 
201 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
202 
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204 
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 				  struct drm_atomic_state *state);
207 
208 static void handle_cursor_update(struct drm_plane *plane,
209 				 struct drm_plane_state *old_plane_state);
210 
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 				 struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238 	if (crtc >= adev->mode_info.num_crtc)
239 		return 0;
240 	else {
241 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242 
243 		if (acrtc->dm_irq_params.stream == NULL) {
244 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 				  crtc);
246 			return 0;
247 		}
248 
249 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250 	}
251 }
252 
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 				  u32 *vbl, u32 *position)
255 {
256 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
257 
258 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 		return -EINVAL;
260 	else {
261 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262 
263 		if (acrtc->dm_irq_params.stream ==  NULL) {
264 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 				  crtc);
266 			return 0;
267 		}
268 
269 		/*
270 		 * TODO rework base driver to use values directly.
271 		 * for now parse it back into reg-format
272 		 */
273 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 					 &v_blank_start,
275 					 &v_blank_end,
276 					 &h_position,
277 					 &v_position);
278 
279 		*position = v_position | (h_position << 16);
280 		*vbl = v_blank_start | (v_blank_end << 16);
281 	}
282 
283 	return 0;
284 }
285 
286 static bool dm_is_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return true;
290 }
291 
292 static int dm_wait_for_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return 0;
296 }
297 
298 static bool dm_check_soft_reset(void *handle)
299 {
300 	return false;
301 }
302 
303 static int dm_soft_reset(void *handle)
304 {
305 	/* XXX todo */
306 	return 0;
307 }
308 
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 		     int otg_inst)
312 {
313 	struct drm_device *dev = adev_to_drm(adev);
314 	struct drm_crtc *crtc;
315 	struct amdgpu_crtc *amdgpu_crtc;
316 
317 	if (otg_inst == -1) {
318 		WARN_ON(1);
319 		return adev->mode_info.crtcs[0];
320 	}
321 
322 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 		amdgpu_crtc = to_amdgpu_crtc(crtc);
324 
325 		if (amdgpu_crtc->otg_inst == otg_inst)
326 			return amdgpu_crtc;
327 	}
328 
329 	return NULL;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334 	return acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_VARIABLE ||
336 	       acrtc->dm_irq_params.freesync_config.state ==
337 		       VRR_STATE_ACTIVE_FIXED;
338 }
339 
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345 
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 					      struct dm_crtc_state *new_state)
348 {
349 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350 		return true;
351 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352 		return true;
353 	else
354 		return false;
355 }
356 
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366 	struct amdgpu_crtc *amdgpu_crtc;
367 	struct common_irq_params *irq_params = interrupt_params;
368 	struct amdgpu_device *adev = irq_params->adev;
369 	unsigned long flags;
370 	struct drm_pending_vblank_event *e;
371 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 	bool vrr_active;
373 
374 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 
376 	/* IRQ could occur when in initial stage */
377 	/* TODO work and BO cleanup */
378 	if (amdgpu_crtc == NULL) {
379 		DC_LOG_PFLIP("CRTC is null, returning.\n");
380 		return;
381 	}
382 
383 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 
385 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387 						 amdgpu_crtc->pflip_status,
388 						 AMDGPU_FLIP_SUBMITTED,
389 						 amdgpu_crtc->crtc_id,
390 						 amdgpu_crtc);
391 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392 		return;
393 	}
394 
395 	/* page flip completed. */
396 	e = amdgpu_crtc->event;
397 	amdgpu_crtc->event = NULL;
398 
399 	if (!e)
400 		WARN_ON(1);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt params - interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif
625 
626 /**
627  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
628  * @interrupt_params: used for determining the Outbox instance
629  *
630  * Handles the Outbox Interrupt
631  * event handler.
632  */
633 #define DMUB_TRACE_MAX_READ 64
634 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
635 {
636 	struct dmub_notification notify;
637 	struct common_irq_params *irq_params = interrupt_params;
638 	struct amdgpu_device *adev = irq_params->adev;
639 	struct amdgpu_display_manager *dm = &adev->dm;
640 	struct dmcub_trace_buf_entry entry = { 0 };
641 	uint32_t count = 0;
642 
643 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
644 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
645 			do {
646 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
647 			} while (notify.pending_notification);
648 
649 			if (adev->dm.dmub_notify)
650 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
651 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
652 				complete(&adev->dm.dmub_aux_transfer_done);
653 			// TODO : HPD Implementation
654 
655 		} else {
656 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657 		}
658 	}
659 
660 
661 	do {
662 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
663 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
664 							entry.param0, entry.param1);
665 
666 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
667 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668 		} else
669 			break;
670 
671 		count++;
672 
673 	} while (count <= DMUB_TRACE_MAX_READ);
674 
675 	ASSERT(count <= DMUB_TRACE_MAX_READ);
676 }
677 #endif
678 
679 static int dm_set_clockgating_state(void *handle,
680 		  enum amd_clockgating_state state)
681 {
682 	return 0;
683 }
684 
685 static int dm_set_powergating_state(void *handle,
686 		  enum amd_powergating_state state)
687 {
688 	return 0;
689 }
690 
691 /* Prototypes of private functions */
692 static int dm_early_init(void* handle);
693 
694 /* Allocate memory for FBC compressed data  */
695 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
696 {
697 	struct drm_device *dev = connector->dev;
698 	struct amdgpu_device *adev = drm_to_adev(dev);
699 	struct dm_compressor_info *compressor = &adev->dm.compressor;
700 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
701 	struct drm_display_mode *mode;
702 	unsigned long max_size = 0;
703 
704 	if (adev->dm.dc->fbc_compressor == NULL)
705 		return;
706 
707 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708 		return;
709 
710 	if (compressor->bo_ptr)
711 		return;
712 
713 
714 	list_for_each_entry(mode, &connector->modes, head) {
715 		if (max_size < mode->htotal * mode->vtotal)
716 			max_size = mode->htotal * mode->vtotal;
717 	}
718 
719 	if (max_size) {
720 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
721 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
722 			    &compressor->gpu_addr, &compressor->cpu_addr);
723 
724 		if (r)
725 			DRM_ERROR("DM: Failed to initialize FBC\n");
726 		else {
727 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
728 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
729 		}
730 
731 	}
732 
733 }
734 
735 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
736 					  int pipe, bool *enabled,
737 					  unsigned char *buf, int max_bytes)
738 {
739 	struct drm_device *dev = dev_get_drvdata(kdev);
740 	struct amdgpu_device *adev = drm_to_adev(dev);
741 	struct drm_connector *connector;
742 	struct drm_connector_list_iter conn_iter;
743 	struct amdgpu_dm_connector *aconnector;
744 	int ret = 0;
745 
746 	*enabled = false;
747 
748 	mutex_lock(&adev->dm.audio_lock);
749 
750 	drm_connector_list_iter_begin(dev, &conn_iter);
751 	drm_for_each_connector_iter(connector, &conn_iter) {
752 		aconnector = to_amdgpu_dm_connector(connector);
753 		if (aconnector->audio_inst != port)
754 			continue;
755 
756 		*enabled = true;
757 		ret = drm_eld_size(connector->eld);
758 		memcpy(buf, connector->eld, min(max_bytes, ret));
759 
760 		break;
761 	}
762 	drm_connector_list_iter_end(&conn_iter);
763 
764 	mutex_unlock(&adev->dm.audio_lock);
765 
766 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
767 
768 	return ret;
769 }
770 
771 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
772 	.get_eld = amdgpu_dm_audio_component_get_eld,
773 };
774 
775 static int amdgpu_dm_audio_component_bind(struct device *kdev,
776 				       struct device *hda_kdev, void *data)
777 {
778 	struct drm_device *dev = dev_get_drvdata(kdev);
779 	struct amdgpu_device *adev = drm_to_adev(dev);
780 	struct drm_audio_component *acomp = data;
781 
782 	acomp->ops = &amdgpu_dm_audio_component_ops;
783 	acomp->dev = kdev;
784 	adev->dm.audio_component = acomp;
785 
786 	return 0;
787 }
788 
789 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
790 					  struct device *hda_kdev, void *data)
791 {
792 	struct drm_device *dev = dev_get_drvdata(kdev);
793 	struct amdgpu_device *adev = drm_to_adev(dev);
794 	struct drm_audio_component *acomp = data;
795 
796 	acomp->ops = NULL;
797 	acomp->dev = NULL;
798 	adev->dm.audio_component = NULL;
799 }
800 
801 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
802 	.bind	= amdgpu_dm_audio_component_bind,
803 	.unbind	= amdgpu_dm_audio_component_unbind,
804 };
805 
806 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
807 {
808 	int i, ret;
809 
810 	if (!amdgpu_audio)
811 		return 0;
812 
813 	adev->mode_info.audio.enabled = true;
814 
815 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
816 
817 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
818 		adev->mode_info.audio.pin[i].channels = -1;
819 		adev->mode_info.audio.pin[i].rate = -1;
820 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
821 		adev->mode_info.audio.pin[i].status_bits = 0;
822 		adev->mode_info.audio.pin[i].category_code = 0;
823 		adev->mode_info.audio.pin[i].connected = false;
824 		adev->mode_info.audio.pin[i].id =
825 			adev->dm.dc->res_pool->audios[i]->inst;
826 		adev->mode_info.audio.pin[i].offset = 0;
827 	}
828 
829 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
830 	if (ret < 0)
831 		return ret;
832 
833 	adev->dm.audio_registered = true;
834 
835 	return 0;
836 }
837 
838 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
839 {
840 	if (!amdgpu_audio)
841 		return;
842 
843 	if (!adev->mode_info.audio.enabled)
844 		return;
845 
846 	if (adev->dm.audio_registered) {
847 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
848 		adev->dm.audio_registered = false;
849 	}
850 
851 	/* TODO: Disable audio? */
852 
853 	adev->mode_info.audio.enabled = false;
854 }
855 
856 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
857 {
858 	struct drm_audio_component *acomp = adev->dm.audio_component;
859 
860 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
861 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
862 
863 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
864 						 pin, -1);
865 	}
866 }
867 
868 static int dm_dmub_hw_init(struct amdgpu_device *adev)
869 {
870 	const struct dmcub_firmware_header_v1_0 *hdr;
871 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
872 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
873 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
874 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
875 	struct abm *abm = adev->dm.dc->res_pool->abm;
876 	struct dmub_srv_hw_params hw_params;
877 	enum dmub_status status;
878 	const unsigned char *fw_inst_const, *fw_bss_data;
879 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
880 	bool has_hw_support;
881 
882 	if (!dmub_srv)
883 		/* DMUB isn't supported on the ASIC. */
884 		return 0;
885 
886 	if (!fb_info) {
887 		DRM_ERROR("No framebuffer info for DMUB service.\n");
888 		return -EINVAL;
889 	}
890 
891 	if (!dmub_fw) {
892 		/* Firmware required for DMUB support. */
893 		DRM_ERROR("No firmware provided for DMUB.\n");
894 		return -EINVAL;
895 	}
896 
897 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
898 	if (status != DMUB_STATUS_OK) {
899 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
900 		return -EINVAL;
901 	}
902 
903 	if (!has_hw_support) {
904 		DRM_INFO("DMUB unsupported on ASIC\n");
905 		return 0;
906 	}
907 
908 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
909 
910 	fw_inst_const = dmub_fw->data +
911 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912 			PSP_HEADER_BYTES;
913 
914 	fw_bss_data = dmub_fw->data +
915 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
916 		      le32_to_cpu(hdr->inst_const_bytes);
917 
918 	/* Copy firmware and bios info into FB memory. */
919 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
920 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
921 
922 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
923 
924 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
925 	 * amdgpu_ucode_init_single_fw will load dmub firmware
926 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
927 	 * will be done by dm_dmub_hw_init
928 	 */
929 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
930 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
931 				fw_inst_const_size);
932 	}
933 
934 	if (fw_bss_data_size)
935 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
936 		       fw_bss_data, fw_bss_data_size);
937 
938 	/* Copy firmware bios info into FB memory. */
939 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940 	       adev->bios_size);
941 
942 	/* Reset regions that need to be reset. */
943 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
944 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
945 
946 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
947 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
948 
949 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
950 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
951 
952 	/* Initialize hardware. */
953 	memset(&hw_params, 0, sizeof(hw_params));
954 	hw_params.fb_base = adev->gmc.fb_start;
955 	hw_params.fb_offset = adev->gmc.aper_base;
956 
957 	/* backdoor load firmware and trigger dmub running */
958 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
959 		hw_params.load_inst_const = true;
960 
961 	if (dmcu)
962 		hw_params.psp_version = dmcu->psp_version;
963 
964 	for (i = 0; i < fb_info->num_fb; ++i)
965 		hw_params.fb[i] = &fb_info->fb[i];
966 
967 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
968 	if (status != DMUB_STATUS_OK) {
969 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
970 		return -EINVAL;
971 	}
972 
973 	/* Wait for firmware load to finish. */
974 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
975 	if (status != DMUB_STATUS_OK)
976 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
977 
978 	/* Init DMCU and ABM if available. */
979 	if (dmcu && abm) {
980 		dmcu->funcs->dmcu_init(dmcu);
981 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982 	}
983 
984 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
985 	if (!adev->dm.dc->ctx->dmub_srv) {
986 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
987 		return -ENOMEM;
988 	}
989 
990 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
991 		 adev->dm.dmcub_fw_version);
992 
993 	return 0;
994 }
995 
996 #if defined(CONFIG_DRM_AMD_DC_DCN)
997 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
998 {
999 	uint64_t pt_base;
1000 	uint32_t logical_addr_low;
1001 	uint32_t logical_addr_high;
1002 	uint32_t agp_base, agp_bot, agp_top;
1003 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1004 
1005 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007 
1008 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009 		/*
1010 		 * Raven2 has a HW issue that it is unable to use the vram which
1011 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012 		 * workaround that increase system aperture high address (add 1)
1013 		 * to get rid of the VM fault and hardware hang.
1014 		 */
1015 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016 	else
1017 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018 
1019 	agp_base = 0;
1020 	agp_bot = adev->gmc.agp_start >> 24;
1021 	agp_top = adev->gmc.agp_end >> 24;
1022 
1023 
1024 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029 	page_table_base.low_part = lower_32_bits(pt_base);
1030 
1031 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033 
1034 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037 
1038 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041 
1042 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045 
1046 	pa_config->is_hvm_enabled = 0;
1047 
1048 }
1049 #endif
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void event_mall_stutter(struct work_struct *work)
1052 {
1053 
1054 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1055 	struct amdgpu_display_manager *dm = vblank_work->dm;
1056 
1057 	mutex_lock(&dm->dc_lock);
1058 
1059 	if (vblank_work->enable)
1060 		dm->active_vblank_irq_count++;
1061 	else if(dm->active_vblank_irq_count)
1062 		dm->active_vblank_irq_count--;
1063 
1064 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065 
1066 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067 
1068 	mutex_unlock(&dm->dc_lock);
1069 }
1070 
1071 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1072 {
1073 
1074 	int max_caps = dc->caps.max_links;
1075 	struct vblank_workqueue *vblank_work;
1076 	int i = 0;
1077 
1078 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1079 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1080 		kfree(vblank_work);
1081 		return NULL;
1082 	}
1083 
1084 	for (i = 0; i < max_caps; i++)
1085 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1086 
1087 	return vblank_work;
1088 }
1089 #endif
1090 static int amdgpu_dm_init(struct amdgpu_device *adev)
1091 {
1092 	struct dc_init_data init_data;
1093 #ifdef CONFIG_DRM_AMD_DC_HDCP
1094 	struct dc_callback_init init_params;
1095 #endif
1096 	int r;
1097 
1098 	adev->dm.ddev = adev_to_drm(adev);
1099 	adev->dm.adev = adev;
1100 
1101 	/* Zero all the fields */
1102 	memset(&init_data, 0, sizeof(init_data));
1103 #ifdef CONFIG_DRM_AMD_DC_HDCP
1104 	memset(&init_params, 0, sizeof(init_params));
1105 #endif
1106 
1107 	mutex_init(&adev->dm.dc_lock);
1108 	mutex_init(&adev->dm.audio_lock);
1109 #if defined(CONFIG_DRM_AMD_DC_DCN)
1110 	spin_lock_init(&adev->dm.vblank_lock);
1111 #endif
1112 
1113 	if(amdgpu_dm_irq_init(adev)) {
1114 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1115 		goto error;
1116 	}
1117 
1118 	init_data.asic_id.chip_family = adev->family;
1119 
1120 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1121 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1122 
1123 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1124 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1125 	init_data.asic_id.atombios_base_address =
1126 		adev->mode_info.atom_context->bios;
1127 
1128 	init_data.driver = adev;
1129 
1130 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1131 
1132 	if (!adev->dm.cgs_device) {
1133 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1134 		goto error;
1135 	}
1136 
1137 	init_data.cgs_device = adev->dm.cgs_device;
1138 
1139 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1140 
1141 	switch (adev->asic_type) {
1142 	case CHIP_CARRIZO:
1143 	case CHIP_STONEY:
1144 	case CHIP_RAVEN:
1145 	case CHIP_RENOIR:
1146 		init_data.flags.gpu_vm_support = true;
1147 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1148 			init_data.flags.disable_dmcu = true;
1149 		break;
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151 	case CHIP_VANGOGH:
1152 		init_data.flags.gpu_vm_support = true;
1153 		break;
1154 #endif
1155 	default:
1156 		break;
1157 	}
1158 
1159 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1160 		init_data.flags.fbc_support = true;
1161 
1162 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1163 		init_data.flags.multi_mon_pp_mclk_switch = true;
1164 
1165 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1166 		init_data.flags.disable_fractional_pwm = true;
1167 
1168 	init_data.flags.power_down_display_on_boot = true;
1169 
1170 	INIT_LIST_HEAD(&adev->dm.da_list);
1171 	/* Display Core create. */
1172 	adev->dm.dc = dc_create(&init_data);
1173 
1174 	if (adev->dm.dc) {
1175 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1176 	} else {
1177 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1178 		goto error;
1179 	}
1180 
1181 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1182 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1183 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184 	}
1185 
1186 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1187 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1188 
1189 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1190 		adev->dm.dc->debug.disable_stutter = true;
1191 
1192 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1193 		adev->dm.dc->debug.disable_dsc = true;
1194 
1195 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1196 		adev->dm.dc->debug.disable_clock_gate = true;
1197 
1198 	r = dm_dmub_hw_init(adev);
1199 	if (r) {
1200 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1201 		goto error;
1202 	}
1203 
1204 	dc_hardware_init(adev->dm.dc);
1205 
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207 	if (adev->apu_flags) {
1208 		struct dc_phy_addr_space_config pa_config;
1209 
1210 		mmhub_read_system_context(adev, &pa_config);
1211 
1212 		// Call the DC init_memory func
1213 		dc_setup_system_context(adev->dm.dc, &pa_config);
1214 	}
1215 #endif
1216 
1217 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1218 	if (!adev->dm.freesync_module) {
1219 		DRM_ERROR(
1220 		"amdgpu: failed to initialize freesync_module.\n");
1221 	} else
1222 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1223 				adev->dm.freesync_module);
1224 
1225 	amdgpu_dm_init_color_mod();
1226 
1227 #if defined(CONFIG_DRM_AMD_DC_DCN)
1228 	if (adev->dm.dc->caps.max_links > 0) {
1229 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1230 
1231 		if (!adev->dm.vblank_workqueue)
1232 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1233 		else
1234 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1235 	}
1236 #endif
1237 
1238 #ifdef CONFIG_DRM_AMD_DC_HDCP
1239 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1240 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1241 
1242 		if (!adev->dm.hdcp_workqueue)
1243 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1244 		else
1245 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1246 
1247 		dc_init_callbacks(adev->dm.dc, &init_params);
1248 	}
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1251 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1252 #endif
1253 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1254 		init_completion(&adev->dm.dmub_aux_transfer_done);
1255 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1256 		if (!adev->dm.dmub_notify) {
1257 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258 			goto error;
1259 		}
1260 		amdgpu_dm_outbox_init(adev);
1261 	}
1262 
1263 	if (amdgpu_dm_initialize_drm_device(adev)) {
1264 		DRM_ERROR(
1265 		"amdgpu: failed to initialize sw for display support.\n");
1266 		goto error;
1267 	}
1268 
1269 	/* create fake encoders for MST */
1270 	dm_dp_create_fake_mst_encoders(adev);
1271 
1272 	/* TODO: Add_display_info? */
1273 
1274 	/* TODO use dynamic cursor width */
1275 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1276 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1277 
1278 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1279 		DRM_ERROR(
1280 		"amdgpu: failed to initialize sw for display support.\n");
1281 		goto error;
1282 	}
1283 
1284 
1285 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1286 
1287 	return 0;
1288 error:
1289 	amdgpu_dm_fini(adev);
1290 
1291 	return -EINVAL;
1292 }
1293 
1294 static int amdgpu_dm_early_fini(void *handle)
1295 {
1296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 
1298 	amdgpu_dm_audio_fini(adev);
1299 
1300 	return 0;
1301 }
1302 
1303 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1304 {
1305 	int i;
1306 
1307 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1308 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1309 	}
1310 
1311 	amdgpu_dm_destroy_drm_device(&adev->dm);
1312 
1313 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1314 	if (adev->dm.crc_rd_wrk) {
1315 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1316 		kfree(adev->dm.crc_rd_wrk);
1317 		adev->dm.crc_rd_wrk = NULL;
1318 	}
1319 #endif
1320 #ifdef CONFIG_DRM_AMD_DC_HDCP
1321 	if (adev->dm.hdcp_workqueue) {
1322 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1323 		adev->dm.hdcp_workqueue = NULL;
1324 	}
1325 
1326 	if (adev->dm.dc)
1327 		dc_deinit_callbacks(adev->dm.dc);
1328 #endif
1329 
1330 #if defined(CONFIG_DRM_AMD_DC_DCN)
1331 	if (adev->dm.vblank_workqueue) {
1332 		adev->dm.vblank_workqueue->dm = NULL;
1333 		kfree(adev->dm.vblank_workqueue);
1334 		adev->dm.vblank_workqueue = NULL;
1335 	}
1336 #endif
1337 
1338 	if (adev->dm.dc->ctx->dmub_srv) {
1339 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1340 		adev->dm.dc->ctx->dmub_srv = NULL;
1341 	}
1342 
1343 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1344 		kfree(adev->dm.dmub_notify);
1345 		adev->dm.dmub_notify = NULL;
1346 	}
1347 
1348 	if (adev->dm.dmub_bo)
1349 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1350 				      &adev->dm.dmub_bo_gpu_addr,
1351 				      &adev->dm.dmub_bo_cpu_addr);
1352 
1353 	/* DC Destroy TODO: Replace destroy DAL */
1354 	if (adev->dm.dc)
1355 		dc_destroy(&adev->dm.dc);
1356 	/*
1357 	 * TODO: pageflip, vlank interrupt
1358 	 *
1359 	 * amdgpu_dm_irq_fini(adev);
1360 	 */
1361 
1362 	if (adev->dm.cgs_device) {
1363 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1364 		adev->dm.cgs_device = NULL;
1365 	}
1366 	if (adev->dm.freesync_module) {
1367 		mod_freesync_destroy(adev->dm.freesync_module);
1368 		adev->dm.freesync_module = NULL;
1369 	}
1370 
1371 	mutex_destroy(&adev->dm.audio_lock);
1372 	mutex_destroy(&adev->dm.dc_lock);
1373 
1374 	return;
1375 }
1376 
1377 static int load_dmcu_fw(struct amdgpu_device *adev)
1378 {
1379 	const char *fw_name_dmcu = NULL;
1380 	int r;
1381 	const struct dmcu_firmware_header_v1_0 *hdr;
1382 
1383 	switch(adev->asic_type) {
1384 #if defined(CONFIG_DRM_AMD_DC_SI)
1385 	case CHIP_TAHITI:
1386 	case CHIP_PITCAIRN:
1387 	case CHIP_VERDE:
1388 	case CHIP_OLAND:
1389 #endif
1390 	case CHIP_BONAIRE:
1391 	case CHIP_HAWAII:
1392 	case CHIP_KAVERI:
1393 	case CHIP_KABINI:
1394 	case CHIP_MULLINS:
1395 	case CHIP_TONGA:
1396 	case CHIP_FIJI:
1397 	case CHIP_CARRIZO:
1398 	case CHIP_STONEY:
1399 	case CHIP_POLARIS11:
1400 	case CHIP_POLARIS10:
1401 	case CHIP_POLARIS12:
1402 	case CHIP_VEGAM:
1403 	case CHIP_VEGA10:
1404 	case CHIP_VEGA12:
1405 	case CHIP_VEGA20:
1406 	case CHIP_NAVI10:
1407 	case CHIP_NAVI14:
1408 	case CHIP_RENOIR:
1409 	case CHIP_SIENNA_CICHLID:
1410 	case CHIP_NAVY_FLOUNDER:
1411 	case CHIP_DIMGREY_CAVEFISH:
1412 	case CHIP_BEIGE_GOBY:
1413 	case CHIP_VANGOGH:
1414 		return 0;
1415 	case CHIP_NAVI12:
1416 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1417 		break;
1418 	case CHIP_RAVEN:
1419 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1420 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1421 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1422 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1423 		else
1424 			return 0;
1425 		break;
1426 	default:
1427 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1428 		return -EINVAL;
1429 	}
1430 
1431 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1432 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1433 		return 0;
1434 	}
1435 
1436 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1437 	if (r == -ENOENT) {
1438 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1439 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1440 		adev->dm.fw_dmcu = NULL;
1441 		return 0;
1442 	}
1443 	if (r) {
1444 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1445 			fw_name_dmcu);
1446 		return r;
1447 	}
1448 
1449 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1450 	if (r) {
1451 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1452 			fw_name_dmcu);
1453 		release_firmware(adev->dm.fw_dmcu);
1454 		adev->dm.fw_dmcu = NULL;
1455 		return r;
1456 	}
1457 
1458 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1459 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1460 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1461 	adev->firmware.fw_size +=
1462 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1463 
1464 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1465 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1466 	adev->firmware.fw_size +=
1467 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1468 
1469 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1470 
1471 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1472 
1473 	return 0;
1474 }
1475 
1476 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1477 {
1478 	struct amdgpu_device *adev = ctx;
1479 
1480 	return dm_read_reg(adev->dm.dc->ctx, address);
1481 }
1482 
1483 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1484 				     uint32_t value)
1485 {
1486 	struct amdgpu_device *adev = ctx;
1487 
1488 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1489 }
1490 
1491 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1492 {
1493 	struct dmub_srv_create_params create_params;
1494 	struct dmub_srv_region_params region_params;
1495 	struct dmub_srv_region_info region_info;
1496 	struct dmub_srv_fb_params fb_params;
1497 	struct dmub_srv_fb_info *fb_info;
1498 	struct dmub_srv *dmub_srv;
1499 	const struct dmcub_firmware_header_v1_0 *hdr;
1500 	const char *fw_name_dmub;
1501 	enum dmub_asic dmub_asic;
1502 	enum dmub_status status;
1503 	int r;
1504 
1505 	switch (adev->asic_type) {
1506 	case CHIP_RENOIR:
1507 		dmub_asic = DMUB_ASIC_DCN21;
1508 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1509 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1510 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1511 		break;
1512 	case CHIP_SIENNA_CICHLID:
1513 		dmub_asic = DMUB_ASIC_DCN30;
1514 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1515 		break;
1516 	case CHIP_NAVY_FLOUNDER:
1517 		dmub_asic = DMUB_ASIC_DCN30;
1518 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1519 		break;
1520 	case CHIP_VANGOGH:
1521 		dmub_asic = DMUB_ASIC_DCN301;
1522 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1523 		break;
1524 	case CHIP_DIMGREY_CAVEFISH:
1525 		dmub_asic = DMUB_ASIC_DCN302;
1526 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1527 		break;
1528 	case CHIP_BEIGE_GOBY:
1529 		dmub_asic = DMUB_ASIC_DCN303;
1530 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1531 		break;
1532 
1533 	default:
1534 		/* ASIC doesn't support DMUB. */
1535 		return 0;
1536 	}
1537 
1538 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1539 	if (r) {
1540 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1541 		return 0;
1542 	}
1543 
1544 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1545 	if (r) {
1546 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1547 		return 0;
1548 	}
1549 
1550 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1551 
1552 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1553 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1554 			AMDGPU_UCODE_ID_DMCUB;
1555 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1556 			adev->dm.dmub_fw;
1557 		adev->firmware.fw_size +=
1558 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1559 
1560 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1561 			 adev->dm.dmcub_fw_version);
1562 	}
1563 
1564 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1565 
1566 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1567 	dmub_srv = adev->dm.dmub_srv;
1568 
1569 	if (!dmub_srv) {
1570 		DRM_ERROR("Failed to allocate DMUB service!\n");
1571 		return -ENOMEM;
1572 	}
1573 
1574 	memset(&create_params, 0, sizeof(create_params));
1575 	create_params.user_ctx = adev;
1576 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1577 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1578 	create_params.asic = dmub_asic;
1579 
1580 	/* Create the DMUB service. */
1581 	status = dmub_srv_create(dmub_srv, &create_params);
1582 	if (status != DMUB_STATUS_OK) {
1583 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1584 		return -EINVAL;
1585 	}
1586 
1587 	/* Calculate the size of all the regions for the DMUB service. */
1588 	memset(&region_params, 0, sizeof(region_params));
1589 
1590 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1591 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1592 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1593 	region_params.vbios_size = adev->bios_size;
1594 	region_params.fw_bss_data = region_params.bss_data_size ?
1595 		adev->dm.dmub_fw->data +
1596 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1597 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1598 	region_params.fw_inst_const =
1599 		adev->dm.dmub_fw->data +
1600 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1601 		PSP_HEADER_BYTES;
1602 
1603 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1604 					   &region_info);
1605 
1606 	if (status != DMUB_STATUS_OK) {
1607 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1608 		return -EINVAL;
1609 	}
1610 
1611 	/*
1612 	 * Allocate a framebuffer based on the total size of all the regions.
1613 	 * TODO: Move this into GART.
1614 	 */
1615 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1616 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1617 				    &adev->dm.dmub_bo_gpu_addr,
1618 				    &adev->dm.dmub_bo_cpu_addr);
1619 	if (r)
1620 		return r;
1621 
1622 	/* Rebase the regions on the framebuffer address. */
1623 	memset(&fb_params, 0, sizeof(fb_params));
1624 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1625 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1626 	fb_params.region_info = &region_info;
1627 
1628 	adev->dm.dmub_fb_info =
1629 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1630 	fb_info = adev->dm.dmub_fb_info;
1631 
1632 	if (!fb_info) {
1633 		DRM_ERROR(
1634 			"Failed to allocate framebuffer info for DMUB service!\n");
1635 		return -ENOMEM;
1636 	}
1637 
1638 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1639 	if (status != DMUB_STATUS_OK) {
1640 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1641 		return -EINVAL;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static int dm_sw_init(void *handle)
1648 {
1649 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1650 	int r;
1651 
1652 	r = dm_dmub_sw_init(adev);
1653 	if (r)
1654 		return r;
1655 
1656 	return load_dmcu_fw(adev);
1657 }
1658 
1659 static int dm_sw_fini(void *handle)
1660 {
1661 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1662 
1663 	kfree(adev->dm.dmub_fb_info);
1664 	adev->dm.dmub_fb_info = NULL;
1665 
1666 	if (adev->dm.dmub_srv) {
1667 		dmub_srv_destroy(adev->dm.dmub_srv);
1668 		adev->dm.dmub_srv = NULL;
1669 	}
1670 
1671 	release_firmware(adev->dm.dmub_fw);
1672 	adev->dm.dmub_fw = NULL;
1673 
1674 	release_firmware(adev->dm.fw_dmcu);
1675 	adev->dm.fw_dmcu = NULL;
1676 
1677 	return 0;
1678 }
1679 
1680 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1681 {
1682 	struct amdgpu_dm_connector *aconnector;
1683 	struct drm_connector *connector;
1684 	struct drm_connector_list_iter iter;
1685 	int ret = 0;
1686 
1687 	drm_connector_list_iter_begin(dev, &iter);
1688 	drm_for_each_connector_iter(connector, &iter) {
1689 		aconnector = to_amdgpu_dm_connector(connector);
1690 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1691 		    aconnector->mst_mgr.aux) {
1692 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1693 					 aconnector,
1694 					 aconnector->base.base.id);
1695 
1696 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1697 			if (ret < 0) {
1698 				DRM_ERROR("DM_MST: Failed to start MST\n");
1699 				aconnector->dc_link->type =
1700 					dc_connection_single;
1701 				break;
1702 			}
1703 		}
1704 	}
1705 	drm_connector_list_iter_end(&iter);
1706 
1707 	return ret;
1708 }
1709 
1710 static int dm_late_init(void *handle)
1711 {
1712 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1713 
1714 	struct dmcu_iram_parameters params;
1715 	unsigned int linear_lut[16];
1716 	int i;
1717 	struct dmcu *dmcu = NULL;
1718 	bool ret = true;
1719 
1720 	dmcu = adev->dm.dc->res_pool->dmcu;
1721 
1722 	for (i = 0; i < 16; i++)
1723 		linear_lut[i] = 0xFFFF * i / 15;
1724 
1725 	params.set = 0;
1726 	params.backlight_ramping_start = 0xCCCC;
1727 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1728 	params.backlight_lut_array_size = 16;
1729 	params.backlight_lut_array = linear_lut;
1730 
1731 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1732 	 * 0xFFFF x 0.01 = 0x28F
1733 	 */
1734 	params.min_abm_backlight = 0x28F;
1735 
1736 	/* In the case where abm is implemented on dmcub,
1737 	 * dmcu object will be null.
1738 	 * ABM 2.4 and up are implemented on dmcub.
1739 	 */
1740 	if (dmcu)
1741 		ret = dmcu_load_iram(dmcu, params);
1742 	else if (adev->dm.dc->ctx->dmub_srv)
1743 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1744 
1745 	if (!ret)
1746 		return -EINVAL;
1747 
1748 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1749 }
1750 
1751 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1752 {
1753 	struct amdgpu_dm_connector *aconnector;
1754 	struct drm_connector *connector;
1755 	struct drm_connector_list_iter iter;
1756 	struct drm_dp_mst_topology_mgr *mgr;
1757 	int ret;
1758 	bool need_hotplug = false;
1759 
1760 	drm_connector_list_iter_begin(dev, &iter);
1761 	drm_for_each_connector_iter(connector, &iter) {
1762 		aconnector = to_amdgpu_dm_connector(connector);
1763 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1764 		    aconnector->mst_port)
1765 			continue;
1766 
1767 		mgr = &aconnector->mst_mgr;
1768 
1769 		if (suspend) {
1770 			drm_dp_mst_topology_mgr_suspend(mgr);
1771 		} else {
1772 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1773 			if (ret < 0) {
1774 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1775 				need_hotplug = true;
1776 			}
1777 		}
1778 	}
1779 	drm_connector_list_iter_end(&iter);
1780 
1781 	if (need_hotplug)
1782 		drm_kms_helper_hotplug_event(dev);
1783 }
1784 
1785 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1786 {
1787 	struct smu_context *smu = &adev->smu;
1788 	int ret = 0;
1789 
1790 	if (!is_support_sw_smu(adev))
1791 		return 0;
1792 
1793 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1794 	 * on window driver dc implementation.
1795 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1796 	 * should be passed to smu during boot up and resume from s3.
1797 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1798 	 * dcn20_resource_construct
1799 	 * then call pplib functions below to pass the settings to smu:
1800 	 * smu_set_watermarks_for_clock_ranges
1801 	 * smu_set_watermarks_table
1802 	 * navi10_set_watermarks_table
1803 	 * smu_write_watermarks_table
1804 	 *
1805 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1806 	 * dc has implemented different flow for window driver:
1807 	 * dc_hardware_init / dc_set_power_state
1808 	 * dcn10_init_hw
1809 	 * notify_wm_ranges
1810 	 * set_wm_ranges
1811 	 * -- Linux
1812 	 * smu_set_watermarks_for_clock_ranges
1813 	 * renoir_set_watermarks_table
1814 	 * smu_write_watermarks_table
1815 	 *
1816 	 * For Linux,
1817 	 * dc_hardware_init -> amdgpu_dm_init
1818 	 * dc_set_power_state --> dm_resume
1819 	 *
1820 	 * therefore, this function apply to navi10/12/14 but not Renoir
1821 	 * *
1822 	 */
1823 	switch(adev->asic_type) {
1824 	case CHIP_NAVI10:
1825 	case CHIP_NAVI14:
1826 	case CHIP_NAVI12:
1827 		break;
1828 	default:
1829 		return 0;
1830 	}
1831 
1832 	ret = smu_write_watermarks_table(smu);
1833 	if (ret) {
1834 		DRM_ERROR("Failed to update WMTABLE!\n");
1835 		return ret;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 /**
1842  * dm_hw_init() - Initialize DC device
1843  * @handle: The base driver device containing the amdgpu_dm device.
1844  *
1845  * Initialize the &struct amdgpu_display_manager device. This involves calling
1846  * the initializers of each DM component, then populating the struct with them.
1847  *
1848  * Although the function implies hardware initialization, both hardware and
1849  * software are initialized here. Splitting them out to their relevant init
1850  * hooks is a future TODO item.
1851  *
1852  * Some notable things that are initialized here:
1853  *
1854  * - Display Core, both software and hardware
1855  * - DC modules that we need (freesync and color management)
1856  * - DRM software states
1857  * - Interrupt sources and handlers
1858  * - Vblank support
1859  * - Debug FS entries, if enabled
1860  */
1861 static int dm_hw_init(void *handle)
1862 {
1863 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1864 	/* Create DAL display manager */
1865 	amdgpu_dm_init(adev);
1866 	amdgpu_dm_hpd_init(adev);
1867 
1868 	return 0;
1869 }
1870 
1871 /**
1872  * dm_hw_fini() - Teardown DC device
1873  * @handle: The base driver device containing the amdgpu_dm device.
1874  *
1875  * Teardown components within &struct amdgpu_display_manager that require
1876  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1877  * were loaded. Also flush IRQ workqueues and disable them.
1878  */
1879 static int dm_hw_fini(void *handle)
1880 {
1881 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1882 
1883 	amdgpu_dm_hpd_fini(adev);
1884 
1885 	amdgpu_dm_irq_fini(adev);
1886 	amdgpu_dm_fini(adev);
1887 	return 0;
1888 }
1889 
1890 
1891 static int dm_enable_vblank(struct drm_crtc *crtc);
1892 static void dm_disable_vblank(struct drm_crtc *crtc);
1893 
1894 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1895 				 struct dc_state *state, bool enable)
1896 {
1897 	enum dc_irq_source irq_source;
1898 	struct amdgpu_crtc *acrtc;
1899 	int rc = -EBUSY;
1900 	int i = 0;
1901 
1902 	for (i = 0; i < state->stream_count; i++) {
1903 		acrtc = get_crtc_by_otg_inst(
1904 				adev, state->stream_status[i].primary_otg_inst);
1905 
1906 		if (acrtc && state->stream_status[i].plane_count != 0) {
1907 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1908 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1909 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1910 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1911 			if (rc)
1912 				DRM_WARN("Failed to %s pflip interrupts\n",
1913 					 enable ? "enable" : "disable");
1914 
1915 			if (enable) {
1916 				rc = dm_enable_vblank(&acrtc->base);
1917 				if (rc)
1918 					DRM_WARN("Failed to enable vblank interrupts\n");
1919 			} else {
1920 				dm_disable_vblank(&acrtc->base);
1921 			}
1922 
1923 		}
1924 	}
1925 
1926 }
1927 
1928 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1929 {
1930 	struct dc_state *context = NULL;
1931 	enum dc_status res = DC_ERROR_UNEXPECTED;
1932 	int i;
1933 	struct dc_stream_state *del_streams[MAX_PIPES];
1934 	int del_streams_count = 0;
1935 
1936 	memset(del_streams, 0, sizeof(del_streams));
1937 
1938 	context = dc_create_state(dc);
1939 	if (context == NULL)
1940 		goto context_alloc_fail;
1941 
1942 	dc_resource_state_copy_construct_current(dc, context);
1943 
1944 	/* First remove from context all streams */
1945 	for (i = 0; i < context->stream_count; i++) {
1946 		struct dc_stream_state *stream = context->streams[i];
1947 
1948 		del_streams[del_streams_count++] = stream;
1949 	}
1950 
1951 	/* Remove all planes for removed streams and then remove the streams */
1952 	for (i = 0; i < del_streams_count; i++) {
1953 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1954 			res = DC_FAIL_DETACH_SURFACES;
1955 			goto fail;
1956 		}
1957 
1958 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1959 		if (res != DC_OK)
1960 			goto fail;
1961 	}
1962 
1963 
1964 	res = dc_validate_global_state(dc, context, false);
1965 
1966 	if (res != DC_OK) {
1967 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1968 		goto fail;
1969 	}
1970 
1971 	res = dc_commit_state(dc, context);
1972 
1973 fail:
1974 	dc_release_state(context);
1975 
1976 context_alloc_fail:
1977 	return res;
1978 }
1979 
1980 static int dm_suspend(void *handle)
1981 {
1982 	struct amdgpu_device *adev = handle;
1983 	struct amdgpu_display_manager *dm = &adev->dm;
1984 	int ret = 0;
1985 
1986 	if (amdgpu_in_reset(adev)) {
1987 		mutex_lock(&dm->dc_lock);
1988 
1989 #if defined(CONFIG_DRM_AMD_DC_DCN)
1990 		dc_allow_idle_optimizations(adev->dm.dc, false);
1991 #endif
1992 
1993 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1994 
1995 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1996 
1997 		amdgpu_dm_commit_zero_streams(dm->dc);
1998 
1999 		amdgpu_dm_irq_suspend(adev);
2000 
2001 		return ret;
2002 	}
2003 
2004 	WARN_ON(adev->dm.cached_state);
2005 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2006 
2007 	s3_handle_mst(adev_to_drm(adev), true);
2008 
2009 	amdgpu_dm_irq_suspend(adev);
2010 
2011 
2012 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2013 
2014 	return 0;
2015 }
2016 
2017 static struct amdgpu_dm_connector *
2018 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2019 					     struct drm_crtc *crtc)
2020 {
2021 	uint32_t i;
2022 	struct drm_connector_state *new_con_state;
2023 	struct drm_connector *connector;
2024 	struct drm_crtc *crtc_from_state;
2025 
2026 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2027 		crtc_from_state = new_con_state->crtc;
2028 
2029 		if (crtc_from_state == crtc)
2030 			return to_amdgpu_dm_connector(connector);
2031 	}
2032 
2033 	return NULL;
2034 }
2035 
2036 static void emulated_link_detect(struct dc_link *link)
2037 {
2038 	struct dc_sink_init_data sink_init_data = { 0 };
2039 	struct display_sink_capability sink_caps = { 0 };
2040 	enum dc_edid_status edid_status;
2041 	struct dc_context *dc_ctx = link->ctx;
2042 	struct dc_sink *sink = NULL;
2043 	struct dc_sink *prev_sink = NULL;
2044 
2045 	link->type = dc_connection_none;
2046 	prev_sink = link->local_sink;
2047 
2048 	if (prev_sink)
2049 		dc_sink_release(prev_sink);
2050 
2051 	switch (link->connector_signal) {
2052 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2053 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2054 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2055 		break;
2056 	}
2057 
2058 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2059 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2060 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2061 		break;
2062 	}
2063 
2064 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2065 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2067 		break;
2068 	}
2069 
2070 	case SIGNAL_TYPE_LVDS: {
2071 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2072 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2073 		break;
2074 	}
2075 
2076 	case SIGNAL_TYPE_EDP: {
2077 		sink_caps.transaction_type =
2078 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079 		sink_caps.signal = SIGNAL_TYPE_EDP;
2080 		break;
2081 	}
2082 
2083 	case SIGNAL_TYPE_DISPLAY_PORT: {
2084 		sink_caps.transaction_type =
2085 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2086 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2087 		break;
2088 	}
2089 
2090 	default:
2091 		DC_ERROR("Invalid connector type! signal:%d\n",
2092 			link->connector_signal);
2093 		return;
2094 	}
2095 
2096 	sink_init_data.link = link;
2097 	sink_init_data.sink_signal = sink_caps.signal;
2098 
2099 	sink = dc_sink_create(&sink_init_data);
2100 	if (!sink) {
2101 		DC_ERROR("Failed to create sink!\n");
2102 		return;
2103 	}
2104 
2105 	/* dc_sink_create returns a new reference */
2106 	link->local_sink = sink;
2107 
2108 	edid_status = dm_helpers_read_local_edid(
2109 			link->ctx,
2110 			link,
2111 			sink);
2112 
2113 	if (edid_status != EDID_OK)
2114 		DC_ERROR("Failed to read EDID");
2115 
2116 }
2117 
2118 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2119 				     struct amdgpu_display_manager *dm)
2120 {
2121 	struct {
2122 		struct dc_surface_update surface_updates[MAX_SURFACES];
2123 		struct dc_plane_info plane_infos[MAX_SURFACES];
2124 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2125 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2126 		struct dc_stream_update stream_update;
2127 	} * bundle;
2128 	int k, m;
2129 
2130 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2131 
2132 	if (!bundle) {
2133 		dm_error("Failed to allocate update bundle\n");
2134 		goto cleanup;
2135 	}
2136 
2137 	for (k = 0; k < dc_state->stream_count; k++) {
2138 		bundle->stream_update.stream = dc_state->streams[k];
2139 
2140 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2141 			bundle->surface_updates[m].surface =
2142 				dc_state->stream_status->plane_states[m];
2143 			bundle->surface_updates[m].surface->force_full_update =
2144 				true;
2145 		}
2146 		dc_commit_updates_for_stream(
2147 			dm->dc, bundle->surface_updates,
2148 			dc_state->stream_status->plane_count,
2149 			dc_state->streams[k], &bundle->stream_update, dc_state);
2150 	}
2151 
2152 cleanup:
2153 	kfree(bundle);
2154 
2155 	return;
2156 }
2157 
2158 static void dm_set_dpms_off(struct dc_link *link)
2159 {
2160 	struct dc_stream_state *stream_state;
2161 	struct amdgpu_dm_connector *aconnector = link->priv;
2162 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2163 	struct dc_stream_update stream_update;
2164 	bool dpms_off = true;
2165 
2166 	memset(&stream_update, 0, sizeof(stream_update));
2167 	stream_update.dpms_off = &dpms_off;
2168 
2169 	mutex_lock(&adev->dm.dc_lock);
2170 	stream_state = dc_stream_find_from_link(link);
2171 
2172 	if (stream_state == NULL) {
2173 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2174 		mutex_unlock(&adev->dm.dc_lock);
2175 		return;
2176 	}
2177 
2178 	stream_update.stream = stream_state;
2179 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2180 				     stream_state, &stream_update,
2181 				     stream_state->ctx->dc->current_state);
2182 	mutex_unlock(&adev->dm.dc_lock);
2183 }
2184 
2185 static int dm_resume(void *handle)
2186 {
2187 	struct amdgpu_device *adev = handle;
2188 	struct drm_device *ddev = adev_to_drm(adev);
2189 	struct amdgpu_display_manager *dm = &adev->dm;
2190 	struct amdgpu_dm_connector *aconnector;
2191 	struct drm_connector *connector;
2192 	struct drm_connector_list_iter iter;
2193 	struct drm_crtc *crtc;
2194 	struct drm_crtc_state *new_crtc_state;
2195 	struct dm_crtc_state *dm_new_crtc_state;
2196 	struct drm_plane *plane;
2197 	struct drm_plane_state *new_plane_state;
2198 	struct dm_plane_state *dm_new_plane_state;
2199 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2200 	enum dc_connection_type new_connection_type = dc_connection_none;
2201 	struct dc_state *dc_state;
2202 	int i, r, j;
2203 
2204 	if (amdgpu_in_reset(adev)) {
2205 		dc_state = dm->cached_dc_state;
2206 
2207 		r = dm_dmub_hw_init(adev);
2208 		if (r)
2209 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2210 
2211 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2212 		dc_resume(dm->dc);
2213 
2214 		amdgpu_dm_irq_resume_early(adev);
2215 
2216 		for (i = 0; i < dc_state->stream_count; i++) {
2217 			dc_state->streams[i]->mode_changed = true;
2218 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2219 				dc_state->stream_status->plane_states[j]->update_flags.raw
2220 					= 0xffffffff;
2221 			}
2222 		}
2223 
2224 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2225 
2226 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2227 
2228 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2229 
2230 		dc_release_state(dm->cached_dc_state);
2231 		dm->cached_dc_state = NULL;
2232 
2233 		amdgpu_dm_irq_resume_late(adev);
2234 
2235 		mutex_unlock(&dm->dc_lock);
2236 
2237 		return 0;
2238 	}
2239 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2240 	dc_release_state(dm_state->context);
2241 	dm_state->context = dc_create_state(dm->dc);
2242 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2243 	dc_resource_state_construct(dm->dc, dm_state->context);
2244 
2245 	/* Before powering on DC we need to re-initialize DMUB. */
2246 	r = dm_dmub_hw_init(adev);
2247 	if (r)
2248 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2249 
2250 	/* power on hardware */
2251 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2252 
2253 	/* program HPD filter */
2254 	dc_resume(dm->dc);
2255 
2256 	/*
2257 	 * early enable HPD Rx IRQ, should be done before set mode as short
2258 	 * pulse interrupts are used for MST
2259 	 */
2260 	amdgpu_dm_irq_resume_early(adev);
2261 
2262 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2263 	s3_handle_mst(ddev, false);
2264 
2265 	/* Do detection*/
2266 	drm_connector_list_iter_begin(ddev, &iter);
2267 	drm_for_each_connector_iter(connector, &iter) {
2268 		aconnector = to_amdgpu_dm_connector(connector);
2269 
2270 		/*
2271 		 * this is the case when traversing through already created
2272 		 * MST connectors, should be skipped
2273 		 */
2274 		if (aconnector->mst_port)
2275 			continue;
2276 
2277 		mutex_lock(&aconnector->hpd_lock);
2278 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2279 			DRM_ERROR("KMS: Failed to detect connector\n");
2280 
2281 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2282 			emulated_link_detect(aconnector->dc_link);
2283 		else
2284 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2285 
2286 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2287 			aconnector->fake_enable = false;
2288 
2289 		if (aconnector->dc_sink)
2290 			dc_sink_release(aconnector->dc_sink);
2291 		aconnector->dc_sink = NULL;
2292 		amdgpu_dm_update_connector_after_detect(aconnector);
2293 		mutex_unlock(&aconnector->hpd_lock);
2294 	}
2295 	drm_connector_list_iter_end(&iter);
2296 
2297 	/* Force mode set in atomic commit */
2298 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2299 		new_crtc_state->active_changed = true;
2300 
2301 	/*
2302 	 * atomic_check is expected to create the dc states. We need to release
2303 	 * them here, since they were duplicated as part of the suspend
2304 	 * procedure.
2305 	 */
2306 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2307 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2308 		if (dm_new_crtc_state->stream) {
2309 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2310 			dc_stream_release(dm_new_crtc_state->stream);
2311 			dm_new_crtc_state->stream = NULL;
2312 		}
2313 	}
2314 
2315 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2316 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2317 		if (dm_new_plane_state->dc_state) {
2318 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2319 			dc_plane_state_release(dm_new_plane_state->dc_state);
2320 			dm_new_plane_state->dc_state = NULL;
2321 		}
2322 	}
2323 
2324 	drm_atomic_helper_resume(ddev, dm->cached_state);
2325 
2326 	dm->cached_state = NULL;
2327 
2328 	amdgpu_dm_irq_resume_late(adev);
2329 
2330 	amdgpu_dm_smu_write_watermarks_table(adev);
2331 
2332 	return 0;
2333 }
2334 
2335 /**
2336  * DOC: DM Lifecycle
2337  *
2338  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2339  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2340  * the base driver's device list to be initialized and torn down accordingly.
2341  *
2342  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2343  */
2344 
2345 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2346 	.name = "dm",
2347 	.early_init = dm_early_init,
2348 	.late_init = dm_late_init,
2349 	.sw_init = dm_sw_init,
2350 	.sw_fini = dm_sw_fini,
2351 	.early_fini = amdgpu_dm_early_fini,
2352 	.hw_init = dm_hw_init,
2353 	.hw_fini = dm_hw_fini,
2354 	.suspend = dm_suspend,
2355 	.resume = dm_resume,
2356 	.is_idle = dm_is_idle,
2357 	.wait_for_idle = dm_wait_for_idle,
2358 	.check_soft_reset = dm_check_soft_reset,
2359 	.soft_reset = dm_soft_reset,
2360 	.set_clockgating_state = dm_set_clockgating_state,
2361 	.set_powergating_state = dm_set_powergating_state,
2362 };
2363 
2364 const struct amdgpu_ip_block_version dm_ip_block =
2365 {
2366 	.type = AMD_IP_BLOCK_TYPE_DCE,
2367 	.major = 1,
2368 	.minor = 0,
2369 	.rev = 0,
2370 	.funcs = &amdgpu_dm_funcs,
2371 };
2372 
2373 
2374 /**
2375  * DOC: atomic
2376  *
2377  * *WIP*
2378  */
2379 
2380 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2381 	.fb_create = amdgpu_display_user_framebuffer_create,
2382 	.get_format_info = amd_get_format_info,
2383 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2384 	.atomic_check = amdgpu_dm_atomic_check,
2385 	.atomic_commit = drm_atomic_helper_commit,
2386 };
2387 
2388 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2389 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2390 };
2391 
2392 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2393 {
2394 	u32 max_cll, min_cll, max, min, q, r;
2395 	struct amdgpu_dm_backlight_caps *caps;
2396 	struct amdgpu_display_manager *dm;
2397 	struct drm_connector *conn_base;
2398 	struct amdgpu_device *adev;
2399 	struct dc_link *link = NULL;
2400 	static const u8 pre_computed_values[] = {
2401 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2402 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2403 
2404 	if (!aconnector || !aconnector->dc_link)
2405 		return;
2406 
2407 	link = aconnector->dc_link;
2408 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2409 		return;
2410 
2411 	conn_base = &aconnector->base;
2412 	adev = drm_to_adev(conn_base->dev);
2413 	dm = &adev->dm;
2414 	caps = &dm->backlight_caps;
2415 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2416 	caps->aux_support = false;
2417 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2418 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2419 
2420 	if (caps->ext_caps->bits.oled == 1 ||
2421 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2422 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2423 		caps->aux_support = true;
2424 
2425 	if (amdgpu_backlight == 0)
2426 		caps->aux_support = false;
2427 	else if (amdgpu_backlight == 1)
2428 		caps->aux_support = true;
2429 
2430 	/* From the specification (CTA-861-G), for calculating the maximum
2431 	 * luminance we need to use:
2432 	 *	Luminance = 50*2**(CV/32)
2433 	 * Where CV is a one-byte value.
2434 	 * For calculating this expression we may need float point precision;
2435 	 * to avoid this complexity level, we take advantage that CV is divided
2436 	 * by a constant. From the Euclids division algorithm, we know that CV
2437 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2438 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2439 	 * need to pre-compute the value of r/32. For pre-computing the values
2440 	 * We just used the following Ruby line:
2441 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2442 	 * The results of the above expressions can be verified at
2443 	 * pre_computed_values.
2444 	 */
2445 	q = max_cll >> 5;
2446 	r = max_cll % 32;
2447 	max = (1 << q) * pre_computed_values[r];
2448 
2449 	// min luminance: maxLum * (CV/255)^2 / 100
2450 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2451 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2452 
2453 	caps->aux_max_input_signal = max;
2454 	caps->aux_min_input_signal = min;
2455 }
2456 
2457 void amdgpu_dm_update_connector_after_detect(
2458 		struct amdgpu_dm_connector *aconnector)
2459 {
2460 	struct drm_connector *connector = &aconnector->base;
2461 	struct drm_device *dev = connector->dev;
2462 	struct dc_sink *sink;
2463 
2464 	/* MST handled by drm_mst framework */
2465 	if (aconnector->mst_mgr.mst_state == true)
2466 		return;
2467 
2468 	sink = aconnector->dc_link->local_sink;
2469 	if (sink)
2470 		dc_sink_retain(sink);
2471 
2472 	/*
2473 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2474 	 * the connector sink is set to either fake or physical sink depends on link status.
2475 	 * Skip if already done during boot.
2476 	 */
2477 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2478 			&& aconnector->dc_em_sink) {
2479 
2480 		/*
2481 		 * For S3 resume with headless use eml_sink to fake stream
2482 		 * because on resume connector->sink is set to NULL
2483 		 */
2484 		mutex_lock(&dev->mode_config.mutex);
2485 
2486 		if (sink) {
2487 			if (aconnector->dc_sink) {
2488 				amdgpu_dm_update_freesync_caps(connector, NULL);
2489 				/*
2490 				 * retain and release below are used to
2491 				 * bump up refcount for sink because the link doesn't point
2492 				 * to it anymore after disconnect, so on next crtc to connector
2493 				 * reshuffle by UMD we will get into unwanted dc_sink release
2494 				 */
2495 				dc_sink_release(aconnector->dc_sink);
2496 			}
2497 			aconnector->dc_sink = sink;
2498 			dc_sink_retain(aconnector->dc_sink);
2499 			amdgpu_dm_update_freesync_caps(connector,
2500 					aconnector->edid);
2501 		} else {
2502 			amdgpu_dm_update_freesync_caps(connector, NULL);
2503 			if (!aconnector->dc_sink) {
2504 				aconnector->dc_sink = aconnector->dc_em_sink;
2505 				dc_sink_retain(aconnector->dc_sink);
2506 			}
2507 		}
2508 
2509 		mutex_unlock(&dev->mode_config.mutex);
2510 
2511 		if (sink)
2512 			dc_sink_release(sink);
2513 		return;
2514 	}
2515 
2516 	/*
2517 	 * TODO: temporary guard to look for proper fix
2518 	 * if this sink is MST sink, we should not do anything
2519 	 */
2520 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2521 		dc_sink_release(sink);
2522 		return;
2523 	}
2524 
2525 	if (aconnector->dc_sink == sink) {
2526 		/*
2527 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2528 		 * Do nothing!!
2529 		 */
2530 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2531 				aconnector->connector_id);
2532 		if (sink)
2533 			dc_sink_release(sink);
2534 		return;
2535 	}
2536 
2537 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2538 		aconnector->connector_id, aconnector->dc_sink, sink);
2539 
2540 	mutex_lock(&dev->mode_config.mutex);
2541 
2542 	/*
2543 	 * 1. Update status of the drm connector
2544 	 * 2. Send an event and let userspace tell us what to do
2545 	 */
2546 	if (sink) {
2547 		/*
2548 		 * TODO: check if we still need the S3 mode update workaround.
2549 		 * If yes, put it here.
2550 		 */
2551 		if (aconnector->dc_sink) {
2552 			amdgpu_dm_update_freesync_caps(connector, NULL);
2553 			dc_sink_release(aconnector->dc_sink);
2554 		}
2555 
2556 		aconnector->dc_sink = sink;
2557 		dc_sink_retain(aconnector->dc_sink);
2558 		if (sink->dc_edid.length == 0) {
2559 			aconnector->edid = NULL;
2560 			if (aconnector->dc_link->aux_mode) {
2561 				drm_dp_cec_unset_edid(
2562 					&aconnector->dm_dp_aux.aux);
2563 			}
2564 		} else {
2565 			aconnector->edid =
2566 				(struct edid *)sink->dc_edid.raw_edid;
2567 
2568 			drm_connector_update_edid_property(connector,
2569 							   aconnector->edid);
2570 			if (aconnector->dc_link->aux_mode)
2571 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2572 						    aconnector->edid);
2573 		}
2574 
2575 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2576 		update_connector_ext_caps(aconnector);
2577 	} else {
2578 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2579 		amdgpu_dm_update_freesync_caps(connector, NULL);
2580 		drm_connector_update_edid_property(connector, NULL);
2581 		aconnector->num_modes = 0;
2582 		dc_sink_release(aconnector->dc_sink);
2583 		aconnector->dc_sink = NULL;
2584 		aconnector->edid = NULL;
2585 #ifdef CONFIG_DRM_AMD_DC_HDCP
2586 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2587 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2588 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2589 #endif
2590 	}
2591 
2592 	mutex_unlock(&dev->mode_config.mutex);
2593 
2594 	update_subconnector_property(aconnector);
2595 
2596 	if (sink)
2597 		dc_sink_release(sink);
2598 }
2599 
2600 static void handle_hpd_irq(void *param)
2601 {
2602 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2603 	struct drm_connector *connector = &aconnector->base;
2604 	struct drm_device *dev = connector->dev;
2605 	enum dc_connection_type new_connection_type = dc_connection_none;
2606 	struct amdgpu_device *adev = drm_to_adev(dev);
2607 #ifdef CONFIG_DRM_AMD_DC_HDCP
2608 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2609 #endif
2610 
2611 	if (adev->dm.disable_hpd_irq)
2612 		return;
2613 
2614 	/*
2615 	 * In case of failure or MST no need to update connector status or notify the OS
2616 	 * since (for MST case) MST does this in its own context.
2617 	 */
2618 	mutex_lock(&aconnector->hpd_lock);
2619 
2620 #ifdef CONFIG_DRM_AMD_DC_HDCP
2621 	if (adev->dm.hdcp_workqueue) {
2622 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2623 		dm_con_state->update_hdcp = true;
2624 	}
2625 #endif
2626 	if (aconnector->fake_enable)
2627 		aconnector->fake_enable = false;
2628 
2629 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2630 		DRM_ERROR("KMS: Failed to detect connector\n");
2631 
2632 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633 		emulated_link_detect(aconnector->dc_link);
2634 
2635 
2636 		drm_modeset_lock_all(dev);
2637 		dm_restore_drm_connector_state(dev, connector);
2638 		drm_modeset_unlock_all(dev);
2639 
2640 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2641 			drm_kms_helper_hotplug_event(dev);
2642 
2643 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2644 		if (new_connection_type == dc_connection_none &&
2645 		    aconnector->dc_link->type == dc_connection_none)
2646 			dm_set_dpms_off(aconnector->dc_link);
2647 
2648 		amdgpu_dm_update_connector_after_detect(aconnector);
2649 
2650 		drm_modeset_lock_all(dev);
2651 		dm_restore_drm_connector_state(dev, connector);
2652 		drm_modeset_unlock_all(dev);
2653 
2654 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2655 			drm_kms_helper_hotplug_event(dev);
2656 	}
2657 	mutex_unlock(&aconnector->hpd_lock);
2658 
2659 }
2660 
2661 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2662 {
2663 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2664 	uint8_t dret;
2665 	bool new_irq_handled = false;
2666 	int dpcd_addr;
2667 	int dpcd_bytes_to_read;
2668 
2669 	const int max_process_count = 30;
2670 	int process_count = 0;
2671 
2672 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2673 
2674 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2675 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2676 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2677 		dpcd_addr = DP_SINK_COUNT;
2678 	} else {
2679 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2680 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2681 		dpcd_addr = DP_SINK_COUNT_ESI;
2682 	}
2683 
2684 	dret = drm_dp_dpcd_read(
2685 		&aconnector->dm_dp_aux.aux,
2686 		dpcd_addr,
2687 		esi,
2688 		dpcd_bytes_to_read);
2689 
2690 	while (dret == dpcd_bytes_to_read &&
2691 		process_count < max_process_count) {
2692 		uint8_t retry;
2693 		dret = 0;
2694 
2695 		process_count++;
2696 
2697 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2698 		/* handle HPD short pulse irq */
2699 		if (aconnector->mst_mgr.mst_state)
2700 			drm_dp_mst_hpd_irq(
2701 				&aconnector->mst_mgr,
2702 				esi,
2703 				&new_irq_handled);
2704 
2705 		if (new_irq_handled) {
2706 			/* ACK at DPCD to notify down stream */
2707 			const int ack_dpcd_bytes_to_write =
2708 				dpcd_bytes_to_read - 1;
2709 
2710 			for (retry = 0; retry < 3; retry++) {
2711 				uint8_t wret;
2712 
2713 				wret = drm_dp_dpcd_write(
2714 					&aconnector->dm_dp_aux.aux,
2715 					dpcd_addr + 1,
2716 					&esi[1],
2717 					ack_dpcd_bytes_to_write);
2718 				if (wret == ack_dpcd_bytes_to_write)
2719 					break;
2720 			}
2721 
2722 			/* check if there is new irq to be handled */
2723 			dret = drm_dp_dpcd_read(
2724 				&aconnector->dm_dp_aux.aux,
2725 				dpcd_addr,
2726 				esi,
2727 				dpcd_bytes_to_read);
2728 
2729 			new_irq_handled = false;
2730 		} else {
2731 			break;
2732 		}
2733 	}
2734 
2735 	if (process_count == max_process_count)
2736 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2737 }
2738 
2739 static void handle_hpd_rx_irq(void *param)
2740 {
2741 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2742 	struct drm_connector *connector = &aconnector->base;
2743 	struct drm_device *dev = connector->dev;
2744 	struct dc_link *dc_link = aconnector->dc_link;
2745 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2746 	bool result = false;
2747 	enum dc_connection_type new_connection_type = dc_connection_none;
2748 	struct amdgpu_device *adev = drm_to_adev(dev);
2749 	union hpd_irq_data hpd_irq_data;
2750 
2751 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2752 
2753 	if (adev->dm.disable_hpd_irq)
2754 		return;
2755 
2756 
2757 	/*
2758 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2759 	 * conflict, after implement i2c helper, this mutex should be
2760 	 * retired.
2761 	 */
2762 	mutex_lock(&aconnector->hpd_lock);
2763 
2764 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2765 
2766 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2767 		(dc_link->type == dc_connection_mst_branch)) {
2768 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2769 			result = true;
2770 			dm_handle_hpd_rx_irq(aconnector);
2771 			goto out;
2772 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2773 			result = false;
2774 			dm_handle_hpd_rx_irq(aconnector);
2775 			goto out;
2776 		}
2777 	}
2778 
2779 	if (!amdgpu_in_reset(adev)) {
2780 		mutex_lock(&adev->dm.dc_lock);
2781 #ifdef CONFIG_DRM_AMD_DC_HDCP
2782 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2783 #else
2784 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2785 #endif
2786 		mutex_unlock(&adev->dm.dc_lock);
2787 	}
2788 
2789 out:
2790 	if (result && !is_mst_root_connector) {
2791 		/* Downstream Port status changed. */
2792 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2793 			DRM_ERROR("KMS: Failed to detect connector\n");
2794 
2795 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2796 			emulated_link_detect(dc_link);
2797 
2798 			if (aconnector->fake_enable)
2799 				aconnector->fake_enable = false;
2800 
2801 			amdgpu_dm_update_connector_after_detect(aconnector);
2802 
2803 
2804 			drm_modeset_lock_all(dev);
2805 			dm_restore_drm_connector_state(dev, connector);
2806 			drm_modeset_unlock_all(dev);
2807 
2808 			drm_kms_helper_hotplug_event(dev);
2809 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2810 
2811 			if (aconnector->fake_enable)
2812 				aconnector->fake_enable = false;
2813 
2814 			amdgpu_dm_update_connector_after_detect(aconnector);
2815 
2816 
2817 			drm_modeset_lock_all(dev);
2818 			dm_restore_drm_connector_state(dev, connector);
2819 			drm_modeset_unlock_all(dev);
2820 
2821 			drm_kms_helper_hotplug_event(dev);
2822 		}
2823 	}
2824 #ifdef CONFIG_DRM_AMD_DC_HDCP
2825 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2826 		if (adev->dm.hdcp_workqueue)
2827 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2828 	}
2829 #endif
2830 
2831 	if (dc_link->type != dc_connection_mst_branch)
2832 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2833 
2834 	mutex_unlock(&aconnector->hpd_lock);
2835 }
2836 
2837 static void register_hpd_handlers(struct amdgpu_device *adev)
2838 {
2839 	struct drm_device *dev = adev_to_drm(adev);
2840 	struct drm_connector *connector;
2841 	struct amdgpu_dm_connector *aconnector;
2842 	const struct dc_link *dc_link;
2843 	struct dc_interrupt_params int_params = {0};
2844 
2845 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2846 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2847 
2848 	list_for_each_entry(connector,
2849 			&dev->mode_config.connector_list, head)	{
2850 
2851 		aconnector = to_amdgpu_dm_connector(connector);
2852 		dc_link = aconnector->dc_link;
2853 
2854 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2855 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2856 			int_params.irq_source = dc_link->irq_source_hpd;
2857 
2858 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859 					handle_hpd_irq,
2860 					(void *) aconnector);
2861 		}
2862 
2863 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2864 
2865 			/* Also register for DP short pulse (hpd_rx). */
2866 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2867 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2868 
2869 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2870 					handle_hpd_rx_irq,
2871 					(void *) aconnector);
2872 		}
2873 	}
2874 }
2875 
2876 #if defined(CONFIG_DRM_AMD_DC_SI)
2877 /* Register IRQ sources and initialize IRQ callbacks */
2878 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2879 {
2880 	struct dc *dc = adev->dm.dc;
2881 	struct common_irq_params *c_irq_params;
2882 	struct dc_interrupt_params int_params = {0};
2883 	int r;
2884 	int i;
2885 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2886 
2887 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2888 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2889 
2890 	/*
2891 	 * Actions of amdgpu_irq_add_id():
2892 	 * 1. Register a set() function with base driver.
2893 	 *    Base driver will call set() function to enable/disable an
2894 	 *    interrupt in DC hardware.
2895 	 * 2. Register amdgpu_dm_irq_handler().
2896 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2897 	 *    coming from DC hardware.
2898 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2899 	 *    for acknowledging and handling. */
2900 
2901 	/* Use VBLANK interrupt */
2902 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2903 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2904 		if (r) {
2905 			DRM_ERROR("Failed to add crtc irq id!\n");
2906 			return r;
2907 		}
2908 
2909 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2910 		int_params.irq_source =
2911 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2912 
2913 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2914 
2915 		c_irq_params->adev = adev;
2916 		c_irq_params->irq_src = int_params.irq_source;
2917 
2918 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2919 				dm_crtc_high_irq, c_irq_params);
2920 	}
2921 
2922 	/* Use GRPH_PFLIP interrupt */
2923 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2924 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2925 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2926 		if (r) {
2927 			DRM_ERROR("Failed to add page flip irq id!\n");
2928 			return r;
2929 		}
2930 
2931 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2932 		int_params.irq_source =
2933 			dc_interrupt_to_irq_source(dc, i, 0);
2934 
2935 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2936 
2937 		c_irq_params->adev = adev;
2938 		c_irq_params->irq_src = int_params.irq_source;
2939 
2940 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2941 				dm_pflip_high_irq, c_irq_params);
2942 
2943 	}
2944 
2945 	/* HPD */
2946 	r = amdgpu_irq_add_id(adev, client_id,
2947 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2948 	if (r) {
2949 		DRM_ERROR("Failed to add hpd irq id!\n");
2950 		return r;
2951 	}
2952 
2953 	register_hpd_handlers(adev);
2954 
2955 	return 0;
2956 }
2957 #endif
2958 
2959 /* Register IRQ sources and initialize IRQ callbacks */
2960 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2961 {
2962 	struct dc *dc = adev->dm.dc;
2963 	struct common_irq_params *c_irq_params;
2964 	struct dc_interrupt_params int_params = {0};
2965 	int r;
2966 	int i;
2967 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2968 
2969 	if (adev->asic_type >= CHIP_VEGA10)
2970 		client_id = SOC15_IH_CLIENTID_DCE;
2971 
2972 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2973 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2974 
2975 	/*
2976 	 * Actions of amdgpu_irq_add_id():
2977 	 * 1. Register a set() function with base driver.
2978 	 *    Base driver will call set() function to enable/disable an
2979 	 *    interrupt in DC hardware.
2980 	 * 2. Register amdgpu_dm_irq_handler().
2981 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2982 	 *    coming from DC hardware.
2983 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2984 	 *    for acknowledging and handling. */
2985 
2986 	/* Use VBLANK interrupt */
2987 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2988 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2989 		if (r) {
2990 			DRM_ERROR("Failed to add crtc irq id!\n");
2991 			return r;
2992 		}
2993 
2994 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2995 		int_params.irq_source =
2996 			dc_interrupt_to_irq_source(dc, i, 0);
2997 
2998 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2999 
3000 		c_irq_params->adev = adev;
3001 		c_irq_params->irq_src = int_params.irq_source;
3002 
3003 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3004 				dm_crtc_high_irq, c_irq_params);
3005 	}
3006 
3007 	/* Use VUPDATE interrupt */
3008 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3009 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3010 		if (r) {
3011 			DRM_ERROR("Failed to add vupdate irq id!\n");
3012 			return r;
3013 		}
3014 
3015 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3016 		int_params.irq_source =
3017 			dc_interrupt_to_irq_source(dc, i, 0);
3018 
3019 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3020 
3021 		c_irq_params->adev = adev;
3022 		c_irq_params->irq_src = int_params.irq_source;
3023 
3024 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3025 				dm_vupdate_high_irq, c_irq_params);
3026 	}
3027 
3028 	/* Use GRPH_PFLIP interrupt */
3029 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3030 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3031 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3032 		if (r) {
3033 			DRM_ERROR("Failed to add page flip irq id!\n");
3034 			return r;
3035 		}
3036 
3037 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3038 		int_params.irq_source =
3039 			dc_interrupt_to_irq_source(dc, i, 0);
3040 
3041 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3042 
3043 		c_irq_params->adev = adev;
3044 		c_irq_params->irq_src = int_params.irq_source;
3045 
3046 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3047 				dm_pflip_high_irq, c_irq_params);
3048 
3049 	}
3050 
3051 	/* HPD */
3052 	r = amdgpu_irq_add_id(adev, client_id,
3053 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3054 	if (r) {
3055 		DRM_ERROR("Failed to add hpd irq id!\n");
3056 		return r;
3057 	}
3058 
3059 	register_hpd_handlers(adev);
3060 
3061 	return 0;
3062 }
3063 
3064 #if defined(CONFIG_DRM_AMD_DC_DCN)
3065 /* Register IRQ sources and initialize IRQ callbacks */
3066 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3067 {
3068 	struct dc *dc = adev->dm.dc;
3069 	struct common_irq_params *c_irq_params;
3070 	struct dc_interrupt_params int_params = {0};
3071 	int r;
3072 	int i;
3073 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3074 	static const unsigned int vrtl_int_srcid[] = {
3075 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3076 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3077 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3078 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3079 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3080 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3081 	};
3082 #endif
3083 
3084 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3085 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3086 
3087 	/*
3088 	 * Actions of amdgpu_irq_add_id():
3089 	 * 1. Register a set() function with base driver.
3090 	 *    Base driver will call set() function to enable/disable an
3091 	 *    interrupt in DC hardware.
3092 	 * 2. Register amdgpu_dm_irq_handler().
3093 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3094 	 *    coming from DC hardware.
3095 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3096 	 *    for acknowledging and handling.
3097 	 */
3098 
3099 	/* Use VSTARTUP interrupt */
3100 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3101 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3102 			i++) {
3103 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3104 
3105 		if (r) {
3106 			DRM_ERROR("Failed to add crtc irq id!\n");
3107 			return r;
3108 		}
3109 
3110 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 		int_params.irq_source =
3112 			dc_interrupt_to_irq_source(dc, i, 0);
3113 
3114 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3115 
3116 		c_irq_params->adev = adev;
3117 		c_irq_params->irq_src = int_params.irq_source;
3118 
3119 		amdgpu_dm_irq_register_interrupt(
3120 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3121 	}
3122 
3123 	/* Use otg vertical line interrupt */
3124 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3125 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3126 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3127 				vrtl_int_srcid[i], &adev->vline0_irq);
3128 
3129 		if (r) {
3130 			DRM_ERROR("Failed to add vline0 irq id!\n");
3131 			return r;
3132 		}
3133 
3134 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3135 		int_params.irq_source =
3136 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3137 
3138 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3139 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3140 			break;
3141 		}
3142 
3143 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3144 					- DC_IRQ_SOURCE_DC1_VLINE0];
3145 
3146 		c_irq_params->adev = adev;
3147 		c_irq_params->irq_src = int_params.irq_source;
3148 
3149 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3150 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3151 	}
3152 #endif
3153 
3154 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3155 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3156 	 * to trigger at end of each vblank, regardless of state of the lock,
3157 	 * matching DCE behaviour.
3158 	 */
3159 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3160 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3161 	     i++) {
3162 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3163 
3164 		if (r) {
3165 			DRM_ERROR("Failed to add vupdate irq id!\n");
3166 			return r;
3167 		}
3168 
3169 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3170 		int_params.irq_source =
3171 			dc_interrupt_to_irq_source(dc, i, 0);
3172 
3173 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3174 
3175 		c_irq_params->adev = adev;
3176 		c_irq_params->irq_src = int_params.irq_source;
3177 
3178 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3179 				dm_vupdate_high_irq, c_irq_params);
3180 	}
3181 
3182 	/* Use GRPH_PFLIP interrupt */
3183 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3184 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3185 			i++) {
3186 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3187 		if (r) {
3188 			DRM_ERROR("Failed to add page flip irq id!\n");
3189 			return r;
3190 		}
3191 
3192 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3193 		int_params.irq_source =
3194 			dc_interrupt_to_irq_source(dc, i, 0);
3195 
3196 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3197 
3198 		c_irq_params->adev = adev;
3199 		c_irq_params->irq_src = int_params.irq_source;
3200 
3201 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3202 				dm_pflip_high_irq, c_irq_params);
3203 
3204 	}
3205 
3206 	/* HPD */
3207 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3208 			&adev->hpd_irq);
3209 	if (r) {
3210 		DRM_ERROR("Failed to add hpd irq id!\n");
3211 		return r;
3212 	}
3213 
3214 	register_hpd_handlers(adev);
3215 
3216 	return 0;
3217 }
3218 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3219 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3220 {
3221 	struct dc *dc = adev->dm.dc;
3222 	struct common_irq_params *c_irq_params;
3223 	struct dc_interrupt_params int_params = {0};
3224 	int r, i;
3225 
3226 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3227 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3228 
3229 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3230 			&adev->dmub_outbox_irq);
3231 	if (r) {
3232 		DRM_ERROR("Failed to add outbox irq id!\n");
3233 		return r;
3234 	}
3235 
3236 	if (dc->ctx->dmub_srv) {
3237 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3238 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3239 		int_params.irq_source =
3240 		dc_interrupt_to_irq_source(dc, i, 0);
3241 
3242 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3243 
3244 		c_irq_params->adev = adev;
3245 		c_irq_params->irq_src = int_params.irq_source;
3246 
3247 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3248 				dm_dmub_outbox1_low_irq, c_irq_params);
3249 	}
3250 
3251 	return 0;
3252 }
3253 #endif
3254 
3255 /*
3256  * Acquires the lock for the atomic state object and returns
3257  * the new atomic state.
3258  *
3259  * This should only be called during atomic check.
3260  */
3261 static int dm_atomic_get_state(struct drm_atomic_state *state,
3262 			       struct dm_atomic_state **dm_state)
3263 {
3264 	struct drm_device *dev = state->dev;
3265 	struct amdgpu_device *adev = drm_to_adev(dev);
3266 	struct amdgpu_display_manager *dm = &adev->dm;
3267 	struct drm_private_state *priv_state;
3268 
3269 	if (*dm_state)
3270 		return 0;
3271 
3272 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3273 	if (IS_ERR(priv_state))
3274 		return PTR_ERR(priv_state);
3275 
3276 	*dm_state = to_dm_atomic_state(priv_state);
3277 
3278 	return 0;
3279 }
3280 
3281 static struct dm_atomic_state *
3282 dm_atomic_get_new_state(struct drm_atomic_state *state)
3283 {
3284 	struct drm_device *dev = state->dev;
3285 	struct amdgpu_device *adev = drm_to_adev(dev);
3286 	struct amdgpu_display_manager *dm = &adev->dm;
3287 	struct drm_private_obj *obj;
3288 	struct drm_private_state *new_obj_state;
3289 	int i;
3290 
3291 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3292 		if (obj->funcs == dm->atomic_obj.funcs)
3293 			return to_dm_atomic_state(new_obj_state);
3294 	}
3295 
3296 	return NULL;
3297 }
3298 
3299 static struct drm_private_state *
3300 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3301 {
3302 	struct dm_atomic_state *old_state, *new_state;
3303 
3304 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3305 	if (!new_state)
3306 		return NULL;
3307 
3308 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3309 
3310 	old_state = to_dm_atomic_state(obj->state);
3311 
3312 	if (old_state && old_state->context)
3313 		new_state->context = dc_copy_state(old_state->context);
3314 
3315 	if (!new_state->context) {
3316 		kfree(new_state);
3317 		return NULL;
3318 	}
3319 
3320 	return &new_state->base;
3321 }
3322 
3323 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3324 				    struct drm_private_state *state)
3325 {
3326 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3327 
3328 	if (dm_state && dm_state->context)
3329 		dc_release_state(dm_state->context);
3330 
3331 	kfree(dm_state);
3332 }
3333 
3334 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3335 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3336 	.atomic_destroy_state = dm_atomic_destroy_state,
3337 };
3338 
3339 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3340 {
3341 	struct dm_atomic_state *state;
3342 	int r;
3343 
3344 	adev->mode_info.mode_config_initialized = true;
3345 
3346 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3347 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3348 
3349 	adev_to_drm(adev)->mode_config.max_width = 16384;
3350 	adev_to_drm(adev)->mode_config.max_height = 16384;
3351 
3352 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3353 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3354 	/* indicates support for immediate flip */
3355 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3356 
3357 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3358 
3359 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3360 	if (!state)
3361 		return -ENOMEM;
3362 
3363 	state->context = dc_create_state(adev->dm.dc);
3364 	if (!state->context) {
3365 		kfree(state);
3366 		return -ENOMEM;
3367 	}
3368 
3369 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3370 
3371 	drm_atomic_private_obj_init(adev_to_drm(adev),
3372 				    &adev->dm.atomic_obj,
3373 				    &state->base,
3374 				    &dm_atomic_state_funcs);
3375 
3376 	r = amdgpu_display_modeset_create_props(adev);
3377 	if (r) {
3378 		dc_release_state(state->context);
3379 		kfree(state);
3380 		return r;
3381 	}
3382 
3383 	r = amdgpu_dm_audio_init(adev);
3384 	if (r) {
3385 		dc_release_state(state->context);
3386 		kfree(state);
3387 		return r;
3388 	}
3389 
3390 	return 0;
3391 }
3392 
3393 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3394 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3395 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3396 
3397 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3398 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3399 
3400 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3401 {
3402 #if defined(CONFIG_ACPI)
3403 	struct amdgpu_dm_backlight_caps caps;
3404 
3405 	memset(&caps, 0, sizeof(caps));
3406 
3407 	if (dm->backlight_caps.caps_valid)
3408 		return;
3409 
3410 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3411 	if (caps.caps_valid) {
3412 		dm->backlight_caps.caps_valid = true;
3413 		if (caps.aux_support)
3414 			return;
3415 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3416 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3417 	} else {
3418 		dm->backlight_caps.min_input_signal =
3419 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420 		dm->backlight_caps.max_input_signal =
3421 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3422 	}
3423 #else
3424 	if (dm->backlight_caps.aux_support)
3425 		return;
3426 
3427 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3428 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3429 #endif
3430 }
3431 
3432 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3433 				unsigned *min, unsigned *max)
3434 {
3435 	if (!caps)
3436 		return 0;
3437 
3438 	if (caps->aux_support) {
3439 		// Firmware limits are in nits, DC API wants millinits.
3440 		*max = 1000 * caps->aux_max_input_signal;
3441 		*min = 1000 * caps->aux_min_input_signal;
3442 	} else {
3443 		// Firmware limits are 8-bit, PWM control is 16-bit.
3444 		*max = 0x101 * caps->max_input_signal;
3445 		*min = 0x101 * caps->min_input_signal;
3446 	}
3447 	return 1;
3448 }
3449 
3450 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3451 					uint32_t brightness)
3452 {
3453 	unsigned min, max;
3454 
3455 	if (!get_brightness_range(caps, &min, &max))
3456 		return brightness;
3457 
3458 	// Rescale 0..255 to min..max
3459 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3460 				       AMDGPU_MAX_BL_LEVEL);
3461 }
3462 
3463 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3464 				      uint32_t brightness)
3465 {
3466 	unsigned min, max;
3467 
3468 	if (!get_brightness_range(caps, &min, &max))
3469 		return brightness;
3470 
3471 	if (brightness < min)
3472 		return 0;
3473 	// Rescale min..max to 0..255
3474 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3475 				 max - min);
3476 }
3477 
3478 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3479 					 u32 user_brightness)
3480 {
3481 	struct amdgpu_dm_backlight_caps caps;
3482 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3483 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3484 	bool rc;
3485 	int i;
3486 
3487 	amdgpu_dm_update_backlight_caps(dm);
3488 	caps = dm->backlight_caps;
3489 
3490 	for (i = 0; i < dm->num_of_edps; i++) {
3491 		dm->brightness[i] = user_brightness;
3492 		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3493 		link[i] = (struct dc_link *)dm->backlight_link[i];
3494 	}
3495 
3496 	/* Change brightness based on AUX property */
3497 	if (caps.aux_support) {
3498 		for (i = 0; i < dm->num_of_edps; i++) {
3499 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3500 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3501 			if (!rc) {
3502 				DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3503 				break;
3504 			}
3505 		}
3506 	} else {
3507 		for (i = 0; i < dm->num_of_edps; i++) {
3508 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3509 			if (!rc) {
3510 				DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3511 				break;
3512 			}
3513 		}
3514 	}
3515 
3516 	return rc ? 0 : 1;
3517 }
3518 
3519 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3520 {
3521 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3522 
3523 	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3524 
3525 	return 0;
3526 }
3527 
3528 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3529 {
3530 	struct amdgpu_dm_backlight_caps caps;
3531 
3532 	amdgpu_dm_update_backlight_caps(dm);
3533 	caps = dm->backlight_caps;
3534 
3535 	if (caps.aux_support) {
3536 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3537 		u32 avg, peak;
3538 		bool rc;
3539 
3540 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3541 		if (!rc)
3542 			return dm->brightness[0];
3543 		return convert_brightness_to_user(&caps, avg);
3544 	} else {
3545 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3546 
3547 		if (ret == DC_ERROR_UNEXPECTED)
3548 			return dm->brightness[0];
3549 		return convert_brightness_to_user(&caps, ret);
3550 	}
3551 }
3552 
3553 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3554 {
3555 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3556 
3557 	return amdgpu_dm_backlight_get_level(dm);
3558 }
3559 
3560 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3561 	.options = BL_CORE_SUSPENDRESUME,
3562 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3563 	.update_status	= amdgpu_dm_backlight_update_status,
3564 };
3565 
3566 static void
3567 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3568 {
3569 	char bl_name[16];
3570 	struct backlight_properties props = { 0 };
3571 	int i;
3572 
3573 	amdgpu_dm_update_backlight_caps(dm);
3574 	for (i = 0; i < dm->num_of_edps; i++)
3575 		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3576 
3577 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3578 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3579 	props.type = BACKLIGHT_RAW;
3580 
3581 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3582 		 adev_to_drm(dm->adev)->primary->index);
3583 
3584 	dm->backlight_dev = backlight_device_register(bl_name,
3585 						      adev_to_drm(dm->adev)->dev,
3586 						      dm,
3587 						      &amdgpu_dm_backlight_ops,
3588 						      &props);
3589 
3590 	if (IS_ERR(dm->backlight_dev))
3591 		DRM_ERROR("DM: Backlight registration failed!\n");
3592 	else
3593 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3594 }
3595 
3596 #endif
3597 
3598 static int initialize_plane(struct amdgpu_display_manager *dm,
3599 			    struct amdgpu_mode_info *mode_info, int plane_id,
3600 			    enum drm_plane_type plane_type,
3601 			    const struct dc_plane_cap *plane_cap)
3602 {
3603 	struct drm_plane *plane;
3604 	unsigned long possible_crtcs;
3605 	int ret = 0;
3606 
3607 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3608 	if (!plane) {
3609 		DRM_ERROR("KMS: Failed to allocate plane\n");
3610 		return -ENOMEM;
3611 	}
3612 	plane->type = plane_type;
3613 
3614 	/*
3615 	 * HACK: IGT tests expect that the primary plane for a CRTC
3616 	 * can only have one possible CRTC. Only expose support for
3617 	 * any CRTC if they're not going to be used as a primary plane
3618 	 * for a CRTC - like overlay or underlay planes.
3619 	 */
3620 	possible_crtcs = 1 << plane_id;
3621 	if (plane_id >= dm->dc->caps.max_streams)
3622 		possible_crtcs = 0xff;
3623 
3624 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3625 
3626 	if (ret) {
3627 		DRM_ERROR("KMS: Failed to initialize plane\n");
3628 		kfree(plane);
3629 		return ret;
3630 	}
3631 
3632 	if (mode_info)
3633 		mode_info->planes[plane_id] = plane;
3634 
3635 	return ret;
3636 }
3637 
3638 
3639 static void register_backlight_device(struct amdgpu_display_manager *dm,
3640 				      struct dc_link *link)
3641 {
3642 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3643 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3644 
3645 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3646 	    link->type != dc_connection_none) {
3647 		/*
3648 		 * Event if registration failed, we should continue with
3649 		 * DM initialization because not having a backlight control
3650 		 * is better then a black screen.
3651 		 */
3652 		if (!dm->backlight_dev)
3653 			amdgpu_dm_register_backlight_device(dm);
3654 
3655 		if (dm->backlight_dev) {
3656 			dm->backlight_link[dm->num_of_edps] = link;
3657 			dm->num_of_edps++;
3658 		}
3659 	}
3660 #endif
3661 }
3662 
3663 
3664 /*
3665  * In this architecture, the association
3666  * connector -> encoder -> crtc
3667  * id not really requried. The crtc and connector will hold the
3668  * display_index as an abstraction to use with DAL component
3669  *
3670  * Returns 0 on success
3671  */
3672 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3673 {
3674 	struct amdgpu_display_manager *dm = &adev->dm;
3675 	int32_t i;
3676 	struct amdgpu_dm_connector *aconnector = NULL;
3677 	struct amdgpu_encoder *aencoder = NULL;
3678 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3679 	uint32_t link_cnt;
3680 	int32_t primary_planes;
3681 	enum dc_connection_type new_connection_type = dc_connection_none;
3682 	const struct dc_plane_cap *plane;
3683 
3684 	dm->display_indexes_num = dm->dc->caps.max_streams;
3685 	/* Update the actual used number of crtc */
3686 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3687 
3688 	link_cnt = dm->dc->caps.max_links;
3689 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3690 		DRM_ERROR("DM: Failed to initialize mode config\n");
3691 		return -EINVAL;
3692 	}
3693 
3694 	/* There is one primary plane per CRTC */
3695 	primary_planes = dm->dc->caps.max_streams;
3696 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3697 
3698 	/*
3699 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3700 	 * Order is reversed to match iteration order in atomic check.
3701 	 */
3702 	for (i = (primary_planes - 1); i >= 0; i--) {
3703 		plane = &dm->dc->caps.planes[i];
3704 
3705 		if (initialize_plane(dm, mode_info, i,
3706 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3707 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3708 			goto fail;
3709 		}
3710 	}
3711 
3712 	/*
3713 	 * Initialize overlay planes, index starting after primary planes.
3714 	 * These planes have a higher DRM index than the primary planes since
3715 	 * they should be considered as having a higher z-order.
3716 	 * Order is reversed to match iteration order in atomic check.
3717 	 *
3718 	 * Only support DCN for now, and only expose one so we don't encourage
3719 	 * userspace to use up all the pipes.
3720 	 */
3721 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3722 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3723 
3724 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3725 			continue;
3726 
3727 		if (!plane->blends_with_above || !plane->blends_with_below)
3728 			continue;
3729 
3730 		if (!plane->pixel_format_support.argb8888)
3731 			continue;
3732 
3733 		if (initialize_plane(dm, NULL, primary_planes + i,
3734 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3735 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3736 			goto fail;
3737 		}
3738 
3739 		/* Only create one overlay plane. */
3740 		break;
3741 	}
3742 
3743 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3744 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3745 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3746 			goto fail;
3747 		}
3748 
3749 #if defined(CONFIG_DRM_AMD_DC_DCN)
3750 	/* Use Outbox interrupt */
3751 	switch (adev->asic_type) {
3752 	case CHIP_SIENNA_CICHLID:
3753 	case CHIP_NAVY_FLOUNDER:
3754 	case CHIP_RENOIR:
3755 		if (register_outbox_irq_handlers(dm->adev)) {
3756 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3757 			goto fail;
3758 		}
3759 		break;
3760 	default:
3761 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3762 	}
3763 #endif
3764 
3765 	/* loops over all connectors on the board */
3766 	for (i = 0; i < link_cnt; i++) {
3767 		struct dc_link *link = NULL;
3768 
3769 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3770 			DRM_ERROR(
3771 				"KMS: Cannot support more than %d display indexes\n",
3772 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3773 			continue;
3774 		}
3775 
3776 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3777 		if (!aconnector)
3778 			goto fail;
3779 
3780 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3781 		if (!aencoder)
3782 			goto fail;
3783 
3784 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3785 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3786 			goto fail;
3787 		}
3788 
3789 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3790 			DRM_ERROR("KMS: Failed to initialize connector\n");
3791 			goto fail;
3792 		}
3793 
3794 		link = dc_get_link_at_index(dm->dc, i);
3795 
3796 		if (!dc_link_detect_sink(link, &new_connection_type))
3797 			DRM_ERROR("KMS: Failed to detect connector\n");
3798 
3799 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3800 			emulated_link_detect(link);
3801 			amdgpu_dm_update_connector_after_detect(aconnector);
3802 
3803 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3804 			amdgpu_dm_update_connector_after_detect(aconnector);
3805 			register_backlight_device(dm, link);
3806 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3807 				amdgpu_dm_set_psr_caps(link);
3808 		}
3809 
3810 
3811 	}
3812 
3813 	/* Software is initialized. Now we can register interrupt handlers. */
3814 	switch (adev->asic_type) {
3815 #if defined(CONFIG_DRM_AMD_DC_SI)
3816 	case CHIP_TAHITI:
3817 	case CHIP_PITCAIRN:
3818 	case CHIP_VERDE:
3819 	case CHIP_OLAND:
3820 		if (dce60_register_irq_handlers(dm->adev)) {
3821 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3822 			goto fail;
3823 		}
3824 		break;
3825 #endif
3826 	case CHIP_BONAIRE:
3827 	case CHIP_HAWAII:
3828 	case CHIP_KAVERI:
3829 	case CHIP_KABINI:
3830 	case CHIP_MULLINS:
3831 	case CHIP_TONGA:
3832 	case CHIP_FIJI:
3833 	case CHIP_CARRIZO:
3834 	case CHIP_STONEY:
3835 	case CHIP_POLARIS11:
3836 	case CHIP_POLARIS10:
3837 	case CHIP_POLARIS12:
3838 	case CHIP_VEGAM:
3839 	case CHIP_VEGA10:
3840 	case CHIP_VEGA12:
3841 	case CHIP_VEGA20:
3842 		if (dce110_register_irq_handlers(dm->adev)) {
3843 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3844 			goto fail;
3845 		}
3846 		break;
3847 #if defined(CONFIG_DRM_AMD_DC_DCN)
3848 	case CHIP_RAVEN:
3849 	case CHIP_NAVI12:
3850 	case CHIP_NAVI10:
3851 	case CHIP_NAVI14:
3852 	case CHIP_RENOIR:
3853 	case CHIP_SIENNA_CICHLID:
3854 	case CHIP_NAVY_FLOUNDER:
3855 	case CHIP_DIMGREY_CAVEFISH:
3856 	case CHIP_BEIGE_GOBY:
3857 	case CHIP_VANGOGH:
3858 		if (dcn10_register_irq_handlers(dm->adev)) {
3859 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3860 			goto fail;
3861 		}
3862 		break;
3863 #endif
3864 	default:
3865 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3866 		goto fail;
3867 	}
3868 
3869 	return 0;
3870 fail:
3871 	kfree(aencoder);
3872 	kfree(aconnector);
3873 
3874 	return -EINVAL;
3875 }
3876 
3877 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3878 {
3879 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3880 	return;
3881 }
3882 
3883 /******************************************************************************
3884  * amdgpu_display_funcs functions
3885  *****************************************************************************/
3886 
3887 /*
3888  * dm_bandwidth_update - program display watermarks
3889  *
3890  * @adev: amdgpu_device pointer
3891  *
3892  * Calculate and program the display watermarks and line buffer allocation.
3893  */
3894 static void dm_bandwidth_update(struct amdgpu_device *adev)
3895 {
3896 	/* TODO: implement later */
3897 }
3898 
3899 static const struct amdgpu_display_funcs dm_display_funcs = {
3900 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3901 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3902 	.backlight_set_level = NULL, /* never called for DC */
3903 	.backlight_get_level = NULL, /* never called for DC */
3904 	.hpd_sense = NULL,/* called unconditionally */
3905 	.hpd_set_polarity = NULL, /* called unconditionally */
3906 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3907 	.page_flip_get_scanoutpos =
3908 		dm_crtc_get_scanoutpos,/* called unconditionally */
3909 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3910 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3911 };
3912 
3913 #if defined(CONFIG_DEBUG_KERNEL_DC)
3914 
3915 static ssize_t s3_debug_store(struct device *device,
3916 			      struct device_attribute *attr,
3917 			      const char *buf,
3918 			      size_t count)
3919 {
3920 	int ret;
3921 	int s3_state;
3922 	struct drm_device *drm_dev = dev_get_drvdata(device);
3923 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3924 
3925 	ret = kstrtoint(buf, 0, &s3_state);
3926 
3927 	if (ret == 0) {
3928 		if (s3_state) {
3929 			dm_resume(adev);
3930 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3931 		} else
3932 			dm_suspend(adev);
3933 	}
3934 
3935 	return ret == 0 ? count : 0;
3936 }
3937 
3938 DEVICE_ATTR_WO(s3_debug);
3939 
3940 #endif
3941 
3942 static int dm_early_init(void *handle)
3943 {
3944 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3945 
3946 	switch (adev->asic_type) {
3947 #if defined(CONFIG_DRM_AMD_DC_SI)
3948 	case CHIP_TAHITI:
3949 	case CHIP_PITCAIRN:
3950 	case CHIP_VERDE:
3951 		adev->mode_info.num_crtc = 6;
3952 		adev->mode_info.num_hpd = 6;
3953 		adev->mode_info.num_dig = 6;
3954 		break;
3955 	case CHIP_OLAND:
3956 		adev->mode_info.num_crtc = 2;
3957 		adev->mode_info.num_hpd = 2;
3958 		adev->mode_info.num_dig = 2;
3959 		break;
3960 #endif
3961 	case CHIP_BONAIRE:
3962 	case CHIP_HAWAII:
3963 		adev->mode_info.num_crtc = 6;
3964 		adev->mode_info.num_hpd = 6;
3965 		adev->mode_info.num_dig = 6;
3966 		break;
3967 	case CHIP_KAVERI:
3968 		adev->mode_info.num_crtc = 4;
3969 		adev->mode_info.num_hpd = 6;
3970 		adev->mode_info.num_dig = 7;
3971 		break;
3972 	case CHIP_KABINI:
3973 	case CHIP_MULLINS:
3974 		adev->mode_info.num_crtc = 2;
3975 		adev->mode_info.num_hpd = 6;
3976 		adev->mode_info.num_dig = 6;
3977 		break;
3978 	case CHIP_FIJI:
3979 	case CHIP_TONGA:
3980 		adev->mode_info.num_crtc = 6;
3981 		adev->mode_info.num_hpd = 6;
3982 		adev->mode_info.num_dig = 7;
3983 		break;
3984 	case CHIP_CARRIZO:
3985 		adev->mode_info.num_crtc = 3;
3986 		adev->mode_info.num_hpd = 6;
3987 		adev->mode_info.num_dig = 9;
3988 		break;
3989 	case CHIP_STONEY:
3990 		adev->mode_info.num_crtc = 2;
3991 		adev->mode_info.num_hpd = 6;
3992 		adev->mode_info.num_dig = 9;
3993 		break;
3994 	case CHIP_POLARIS11:
3995 	case CHIP_POLARIS12:
3996 		adev->mode_info.num_crtc = 5;
3997 		adev->mode_info.num_hpd = 5;
3998 		adev->mode_info.num_dig = 5;
3999 		break;
4000 	case CHIP_POLARIS10:
4001 	case CHIP_VEGAM:
4002 		adev->mode_info.num_crtc = 6;
4003 		adev->mode_info.num_hpd = 6;
4004 		adev->mode_info.num_dig = 6;
4005 		break;
4006 	case CHIP_VEGA10:
4007 	case CHIP_VEGA12:
4008 	case CHIP_VEGA20:
4009 		adev->mode_info.num_crtc = 6;
4010 		adev->mode_info.num_hpd = 6;
4011 		adev->mode_info.num_dig = 6;
4012 		break;
4013 #if defined(CONFIG_DRM_AMD_DC_DCN)
4014 	case CHIP_RAVEN:
4015 	case CHIP_RENOIR:
4016 	case CHIP_VANGOGH:
4017 		adev->mode_info.num_crtc = 4;
4018 		adev->mode_info.num_hpd = 4;
4019 		adev->mode_info.num_dig = 4;
4020 		break;
4021 	case CHIP_NAVI10:
4022 	case CHIP_NAVI12:
4023 	case CHIP_SIENNA_CICHLID:
4024 	case CHIP_NAVY_FLOUNDER:
4025 		adev->mode_info.num_crtc = 6;
4026 		adev->mode_info.num_hpd = 6;
4027 		adev->mode_info.num_dig = 6;
4028 		break;
4029 	case CHIP_NAVI14:
4030 	case CHIP_DIMGREY_CAVEFISH:
4031 		adev->mode_info.num_crtc = 5;
4032 		adev->mode_info.num_hpd = 5;
4033 		adev->mode_info.num_dig = 5;
4034 		break;
4035 	case CHIP_BEIGE_GOBY:
4036 		adev->mode_info.num_crtc = 2;
4037 		adev->mode_info.num_hpd = 2;
4038 		adev->mode_info.num_dig = 2;
4039 		break;
4040 #endif
4041 	default:
4042 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4043 		return -EINVAL;
4044 	}
4045 
4046 	amdgpu_dm_set_irq_funcs(adev);
4047 
4048 	if (adev->mode_info.funcs == NULL)
4049 		adev->mode_info.funcs = &dm_display_funcs;
4050 
4051 	/*
4052 	 * Note: Do NOT change adev->audio_endpt_rreg and
4053 	 * adev->audio_endpt_wreg because they are initialised in
4054 	 * amdgpu_device_init()
4055 	 */
4056 #if defined(CONFIG_DEBUG_KERNEL_DC)
4057 	device_create_file(
4058 		adev_to_drm(adev)->dev,
4059 		&dev_attr_s3_debug);
4060 #endif
4061 
4062 	return 0;
4063 }
4064 
4065 static bool modeset_required(struct drm_crtc_state *crtc_state,
4066 			     struct dc_stream_state *new_stream,
4067 			     struct dc_stream_state *old_stream)
4068 {
4069 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4070 }
4071 
4072 static bool modereset_required(struct drm_crtc_state *crtc_state)
4073 {
4074 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4075 }
4076 
4077 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4078 {
4079 	drm_encoder_cleanup(encoder);
4080 	kfree(encoder);
4081 }
4082 
4083 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4084 	.destroy = amdgpu_dm_encoder_destroy,
4085 };
4086 
4087 
4088 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4089 					 struct drm_framebuffer *fb,
4090 					 int *min_downscale, int *max_upscale)
4091 {
4092 	struct amdgpu_device *adev = drm_to_adev(dev);
4093 	struct dc *dc = adev->dm.dc;
4094 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4095 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4096 
4097 	switch (fb->format->format) {
4098 	case DRM_FORMAT_P010:
4099 	case DRM_FORMAT_NV12:
4100 	case DRM_FORMAT_NV21:
4101 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4102 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4103 		break;
4104 
4105 	case DRM_FORMAT_XRGB16161616F:
4106 	case DRM_FORMAT_ARGB16161616F:
4107 	case DRM_FORMAT_XBGR16161616F:
4108 	case DRM_FORMAT_ABGR16161616F:
4109 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4110 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4111 		break;
4112 
4113 	default:
4114 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4115 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4116 		break;
4117 	}
4118 
4119 	/*
4120 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4121 	 * scaling factor of 1.0 == 1000 units.
4122 	 */
4123 	if (*max_upscale == 1)
4124 		*max_upscale = 1000;
4125 
4126 	if (*min_downscale == 1)
4127 		*min_downscale = 1000;
4128 }
4129 
4130 
4131 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4132 				struct dc_scaling_info *scaling_info)
4133 {
4134 	int scale_w, scale_h, min_downscale, max_upscale;
4135 
4136 	memset(scaling_info, 0, sizeof(*scaling_info));
4137 
4138 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4139 	scaling_info->src_rect.x = state->src_x >> 16;
4140 	scaling_info->src_rect.y = state->src_y >> 16;
4141 
4142 	/*
4143 	 * For reasons we don't (yet) fully understand a non-zero
4144 	 * src_y coordinate into an NV12 buffer can cause a
4145 	 * system hang. To avoid hangs (and maybe be overly cautious)
4146 	 * let's reject both non-zero src_x and src_y.
4147 	 *
4148 	 * We currently know of only one use-case to reproduce a
4149 	 * scenario with non-zero src_x and src_y for NV12, which
4150 	 * is to gesture the YouTube Android app into full screen
4151 	 * on ChromeOS.
4152 	 */
4153 	if (state->fb &&
4154 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4155 	    (scaling_info->src_rect.x != 0 ||
4156 	     scaling_info->src_rect.y != 0))
4157 		return -EINVAL;
4158 
4159 	scaling_info->src_rect.width = state->src_w >> 16;
4160 	if (scaling_info->src_rect.width == 0)
4161 		return -EINVAL;
4162 
4163 	scaling_info->src_rect.height = state->src_h >> 16;
4164 	if (scaling_info->src_rect.height == 0)
4165 		return -EINVAL;
4166 
4167 	scaling_info->dst_rect.x = state->crtc_x;
4168 	scaling_info->dst_rect.y = state->crtc_y;
4169 
4170 	if (state->crtc_w == 0)
4171 		return -EINVAL;
4172 
4173 	scaling_info->dst_rect.width = state->crtc_w;
4174 
4175 	if (state->crtc_h == 0)
4176 		return -EINVAL;
4177 
4178 	scaling_info->dst_rect.height = state->crtc_h;
4179 
4180 	/* DRM doesn't specify clipping on destination output. */
4181 	scaling_info->clip_rect = scaling_info->dst_rect;
4182 
4183 	/* Validate scaling per-format with DC plane caps */
4184 	if (state->plane && state->plane->dev && state->fb) {
4185 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4186 					     &min_downscale, &max_upscale);
4187 	} else {
4188 		min_downscale = 250;
4189 		max_upscale = 16000;
4190 	}
4191 
4192 	scale_w = scaling_info->dst_rect.width * 1000 /
4193 		  scaling_info->src_rect.width;
4194 
4195 	if (scale_w < min_downscale || scale_w > max_upscale)
4196 		return -EINVAL;
4197 
4198 	scale_h = scaling_info->dst_rect.height * 1000 /
4199 		  scaling_info->src_rect.height;
4200 
4201 	if (scale_h < min_downscale || scale_h > max_upscale)
4202 		return -EINVAL;
4203 
4204 	/*
4205 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4206 	 * assume reasonable defaults based on the format.
4207 	 */
4208 
4209 	return 0;
4210 }
4211 
4212 static void
4213 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4214 				 uint64_t tiling_flags)
4215 {
4216 	/* Fill GFX8 params */
4217 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4218 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4219 
4220 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4221 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4222 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4223 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4224 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4225 
4226 		/* XXX fix me for VI */
4227 		tiling_info->gfx8.num_banks = num_banks;
4228 		tiling_info->gfx8.array_mode =
4229 				DC_ARRAY_2D_TILED_THIN1;
4230 		tiling_info->gfx8.tile_split = tile_split;
4231 		tiling_info->gfx8.bank_width = bankw;
4232 		tiling_info->gfx8.bank_height = bankh;
4233 		tiling_info->gfx8.tile_aspect = mtaspect;
4234 		tiling_info->gfx8.tile_mode =
4235 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4236 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4237 			== DC_ARRAY_1D_TILED_THIN1) {
4238 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4239 	}
4240 
4241 	tiling_info->gfx8.pipe_config =
4242 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4243 }
4244 
4245 static void
4246 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4247 				  union dc_tiling_info *tiling_info)
4248 {
4249 	tiling_info->gfx9.num_pipes =
4250 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4251 	tiling_info->gfx9.num_banks =
4252 		adev->gfx.config.gb_addr_config_fields.num_banks;
4253 	tiling_info->gfx9.pipe_interleave =
4254 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4255 	tiling_info->gfx9.num_shader_engines =
4256 		adev->gfx.config.gb_addr_config_fields.num_se;
4257 	tiling_info->gfx9.max_compressed_frags =
4258 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4259 	tiling_info->gfx9.num_rb_per_se =
4260 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4261 	tiling_info->gfx9.shaderEnable = 1;
4262 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4263 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4264 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4265 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4266 	    adev->asic_type == CHIP_VANGOGH)
4267 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4268 }
4269 
4270 static int
4271 validate_dcc(struct amdgpu_device *adev,
4272 	     const enum surface_pixel_format format,
4273 	     const enum dc_rotation_angle rotation,
4274 	     const union dc_tiling_info *tiling_info,
4275 	     const struct dc_plane_dcc_param *dcc,
4276 	     const struct dc_plane_address *address,
4277 	     const struct plane_size *plane_size)
4278 {
4279 	struct dc *dc = adev->dm.dc;
4280 	struct dc_dcc_surface_param input;
4281 	struct dc_surface_dcc_cap output;
4282 
4283 	memset(&input, 0, sizeof(input));
4284 	memset(&output, 0, sizeof(output));
4285 
4286 	if (!dcc->enable)
4287 		return 0;
4288 
4289 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4290 	    !dc->cap_funcs.get_dcc_compression_cap)
4291 		return -EINVAL;
4292 
4293 	input.format = format;
4294 	input.surface_size.width = plane_size->surface_size.width;
4295 	input.surface_size.height = plane_size->surface_size.height;
4296 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4297 
4298 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4299 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4300 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4301 		input.scan = SCAN_DIRECTION_VERTICAL;
4302 
4303 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4304 		return -EINVAL;
4305 
4306 	if (!output.capable)
4307 		return -EINVAL;
4308 
4309 	if (dcc->independent_64b_blks == 0 &&
4310 	    output.grph.rgb.independent_64b_blks != 0)
4311 		return -EINVAL;
4312 
4313 	return 0;
4314 }
4315 
4316 static bool
4317 modifier_has_dcc(uint64_t modifier)
4318 {
4319 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4320 }
4321 
4322 static unsigned
4323 modifier_gfx9_swizzle_mode(uint64_t modifier)
4324 {
4325 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4326 		return 0;
4327 
4328 	return AMD_FMT_MOD_GET(TILE, modifier);
4329 }
4330 
4331 static const struct drm_format_info *
4332 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4333 {
4334 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4335 }
4336 
4337 static void
4338 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4339 				    union dc_tiling_info *tiling_info,
4340 				    uint64_t modifier)
4341 {
4342 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4343 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4344 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4345 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4346 
4347 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4348 
4349 	if (!IS_AMD_FMT_MOD(modifier))
4350 		return;
4351 
4352 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4353 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4354 
4355 	if (adev->family >= AMDGPU_FAMILY_NV) {
4356 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4357 	} else {
4358 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4359 
4360 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4361 	}
4362 }
4363 
4364 enum dm_micro_swizzle {
4365 	MICRO_SWIZZLE_Z = 0,
4366 	MICRO_SWIZZLE_S = 1,
4367 	MICRO_SWIZZLE_D = 2,
4368 	MICRO_SWIZZLE_R = 3
4369 };
4370 
4371 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4372 					  uint32_t format,
4373 					  uint64_t modifier)
4374 {
4375 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4376 	const struct drm_format_info *info = drm_format_info(format);
4377 	int i;
4378 
4379 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4380 
4381 	if (!info)
4382 		return false;
4383 
4384 	/*
4385 	 * We always have to allow these modifiers:
4386 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4387 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4388 	 */
4389 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4390 	    modifier == DRM_FORMAT_MOD_INVALID) {
4391 		return true;
4392 	}
4393 
4394 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4395 	for (i = 0; i < plane->modifier_count; i++) {
4396 		if (modifier == plane->modifiers[i])
4397 			break;
4398 	}
4399 	if (i == plane->modifier_count)
4400 		return false;
4401 
4402 	/*
4403 	 * For D swizzle the canonical modifier depends on the bpp, so check
4404 	 * it here.
4405 	 */
4406 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4407 	    adev->family >= AMDGPU_FAMILY_NV) {
4408 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4409 			return false;
4410 	}
4411 
4412 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4413 	    info->cpp[0] < 8)
4414 		return false;
4415 
4416 	if (modifier_has_dcc(modifier)) {
4417 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4418 		if (info->cpp[0] != 4)
4419 			return false;
4420 		/* We support multi-planar formats, but not when combined with
4421 		 * additional DCC metadata planes. */
4422 		if (info->num_planes > 1)
4423 			return false;
4424 	}
4425 
4426 	return true;
4427 }
4428 
4429 static void
4430 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4431 {
4432 	if (!*mods)
4433 		return;
4434 
4435 	if (*cap - *size < 1) {
4436 		uint64_t new_cap = *cap * 2;
4437 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4438 
4439 		if (!new_mods) {
4440 			kfree(*mods);
4441 			*mods = NULL;
4442 			return;
4443 		}
4444 
4445 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4446 		kfree(*mods);
4447 		*mods = new_mods;
4448 		*cap = new_cap;
4449 	}
4450 
4451 	(*mods)[*size] = mod;
4452 	*size += 1;
4453 }
4454 
4455 static void
4456 add_gfx9_modifiers(const struct amdgpu_device *adev,
4457 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4458 {
4459 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4460 	int pipe_xor_bits = min(8, pipes +
4461 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4462 	int bank_xor_bits = min(8 - pipe_xor_bits,
4463 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4464 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4465 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4466 
4467 
4468 	if (adev->family == AMDGPU_FAMILY_RV) {
4469 		/* Raven2 and later */
4470 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4471 
4472 		/*
4473 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4474 		 * doesn't support _D on DCN
4475 		 */
4476 
4477 		if (has_constant_encode) {
4478 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4479 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4480 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4481 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4482 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4483 				    AMD_FMT_MOD_SET(DCC, 1) |
4484 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4485 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4486 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4487 		}
4488 
4489 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4490 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4491 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4492 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4493 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4494 			    AMD_FMT_MOD_SET(DCC, 1) |
4495 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4496 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4497 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4498 
4499 		if (has_constant_encode) {
4500 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4501 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4502 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4503 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4504 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4505 				    AMD_FMT_MOD_SET(DCC, 1) |
4506 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4507 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4508 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4509 
4510 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4511 				    AMD_FMT_MOD_SET(RB, rb) |
4512 				    AMD_FMT_MOD_SET(PIPE, pipes));
4513 		}
4514 
4515 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4516 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4517 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4518 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4519 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4520 			    AMD_FMT_MOD_SET(DCC, 1) |
4521 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4522 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4523 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4524 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4525 			    AMD_FMT_MOD_SET(RB, rb) |
4526 			    AMD_FMT_MOD_SET(PIPE, pipes));
4527 	}
4528 
4529 	/*
4530 	 * Only supported for 64bpp on Raven, will be filtered on format in
4531 	 * dm_plane_format_mod_supported.
4532 	 */
4533 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4534 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4535 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4536 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4537 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4538 
4539 	if (adev->family == AMDGPU_FAMILY_RV) {
4540 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4541 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4542 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4543 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4544 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4545 	}
4546 
4547 	/*
4548 	 * Only supported for 64bpp on Raven, will be filtered on format in
4549 	 * dm_plane_format_mod_supported.
4550 	 */
4551 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4552 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4553 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4554 
4555 	if (adev->family == AMDGPU_FAMILY_RV) {
4556 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4557 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4558 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4559 	}
4560 }
4561 
4562 static void
4563 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4564 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4565 {
4566 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4567 
4568 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4569 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4570 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4571 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4572 		    AMD_FMT_MOD_SET(DCC, 1) |
4573 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4574 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4575 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4576 
4577 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4578 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4579 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4580 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4581 		    AMD_FMT_MOD_SET(DCC, 1) |
4582 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4583 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4584 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4585 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4586 
4587 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4588 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4589 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4590 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4591 
4592 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4594 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4595 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4596 
4597 
4598 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4599 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4600 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4601 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4602 
4603 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4604 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4605 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4606 }
4607 
4608 static void
4609 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4610 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4611 {
4612 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4613 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4614 
4615 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4616 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4617 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4618 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4619 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4620 		    AMD_FMT_MOD_SET(DCC, 1) |
4621 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4622 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4623 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4624 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4625 
4626 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4628 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4629 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4630 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4631 		    AMD_FMT_MOD_SET(DCC, 1) |
4632 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4633 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4634 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4635 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4636 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4637 
4638 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4639 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4640 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4641 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4642 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4643 
4644 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4645 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4646 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4647 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4648 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4649 
4650 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4651 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4652 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4653 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4654 
4655 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4656 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4657 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4658 }
4659 
4660 static int
4661 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4662 {
4663 	uint64_t size = 0, capacity = 128;
4664 	*mods = NULL;
4665 
4666 	/* We have not hooked up any pre-GFX9 modifiers. */
4667 	if (adev->family < AMDGPU_FAMILY_AI)
4668 		return 0;
4669 
4670 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4671 
4672 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4673 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4674 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4675 		return *mods ? 0 : -ENOMEM;
4676 	}
4677 
4678 	switch (adev->family) {
4679 	case AMDGPU_FAMILY_AI:
4680 	case AMDGPU_FAMILY_RV:
4681 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4682 		break;
4683 	case AMDGPU_FAMILY_NV:
4684 	case AMDGPU_FAMILY_VGH:
4685 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4686 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4687 		else
4688 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4689 		break;
4690 	}
4691 
4692 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4693 
4694 	/* INVALID marks the end of the list. */
4695 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4696 
4697 	if (!*mods)
4698 		return -ENOMEM;
4699 
4700 	return 0;
4701 }
4702 
4703 static int
4704 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4705 					  const struct amdgpu_framebuffer *afb,
4706 					  const enum surface_pixel_format format,
4707 					  const enum dc_rotation_angle rotation,
4708 					  const struct plane_size *plane_size,
4709 					  union dc_tiling_info *tiling_info,
4710 					  struct dc_plane_dcc_param *dcc,
4711 					  struct dc_plane_address *address,
4712 					  const bool force_disable_dcc)
4713 {
4714 	const uint64_t modifier = afb->base.modifier;
4715 	int ret;
4716 
4717 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4718 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4719 
4720 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4721 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4722 
4723 		dcc->enable = 1;
4724 		dcc->meta_pitch = afb->base.pitches[1];
4725 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4726 
4727 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4728 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4729 	}
4730 
4731 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4732 	if (ret)
4733 		return ret;
4734 
4735 	return 0;
4736 }
4737 
4738 static int
4739 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4740 			     const struct amdgpu_framebuffer *afb,
4741 			     const enum surface_pixel_format format,
4742 			     const enum dc_rotation_angle rotation,
4743 			     const uint64_t tiling_flags,
4744 			     union dc_tiling_info *tiling_info,
4745 			     struct plane_size *plane_size,
4746 			     struct dc_plane_dcc_param *dcc,
4747 			     struct dc_plane_address *address,
4748 			     bool tmz_surface,
4749 			     bool force_disable_dcc)
4750 {
4751 	const struct drm_framebuffer *fb = &afb->base;
4752 	int ret;
4753 
4754 	memset(tiling_info, 0, sizeof(*tiling_info));
4755 	memset(plane_size, 0, sizeof(*plane_size));
4756 	memset(dcc, 0, sizeof(*dcc));
4757 	memset(address, 0, sizeof(*address));
4758 
4759 	address->tmz_surface = tmz_surface;
4760 
4761 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4762 		uint64_t addr = afb->address + fb->offsets[0];
4763 
4764 		plane_size->surface_size.x = 0;
4765 		plane_size->surface_size.y = 0;
4766 		plane_size->surface_size.width = fb->width;
4767 		plane_size->surface_size.height = fb->height;
4768 		plane_size->surface_pitch =
4769 			fb->pitches[0] / fb->format->cpp[0];
4770 
4771 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4772 		address->grph.addr.low_part = lower_32_bits(addr);
4773 		address->grph.addr.high_part = upper_32_bits(addr);
4774 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4775 		uint64_t luma_addr = afb->address + fb->offsets[0];
4776 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4777 
4778 		plane_size->surface_size.x = 0;
4779 		plane_size->surface_size.y = 0;
4780 		plane_size->surface_size.width = fb->width;
4781 		plane_size->surface_size.height = fb->height;
4782 		plane_size->surface_pitch =
4783 			fb->pitches[0] / fb->format->cpp[0];
4784 
4785 		plane_size->chroma_size.x = 0;
4786 		plane_size->chroma_size.y = 0;
4787 		/* TODO: set these based on surface format */
4788 		plane_size->chroma_size.width = fb->width / 2;
4789 		plane_size->chroma_size.height = fb->height / 2;
4790 
4791 		plane_size->chroma_pitch =
4792 			fb->pitches[1] / fb->format->cpp[1];
4793 
4794 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4795 		address->video_progressive.luma_addr.low_part =
4796 			lower_32_bits(luma_addr);
4797 		address->video_progressive.luma_addr.high_part =
4798 			upper_32_bits(luma_addr);
4799 		address->video_progressive.chroma_addr.low_part =
4800 			lower_32_bits(chroma_addr);
4801 		address->video_progressive.chroma_addr.high_part =
4802 			upper_32_bits(chroma_addr);
4803 	}
4804 
4805 	if (adev->family >= AMDGPU_FAMILY_AI) {
4806 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4807 								rotation, plane_size,
4808 								tiling_info, dcc,
4809 								address,
4810 								force_disable_dcc);
4811 		if (ret)
4812 			return ret;
4813 	} else {
4814 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4815 	}
4816 
4817 	return 0;
4818 }
4819 
4820 static void
4821 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4822 			       bool *per_pixel_alpha, bool *global_alpha,
4823 			       int *global_alpha_value)
4824 {
4825 	*per_pixel_alpha = false;
4826 	*global_alpha = false;
4827 	*global_alpha_value = 0xff;
4828 
4829 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4830 		return;
4831 
4832 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4833 		static const uint32_t alpha_formats[] = {
4834 			DRM_FORMAT_ARGB8888,
4835 			DRM_FORMAT_RGBA8888,
4836 			DRM_FORMAT_ABGR8888,
4837 		};
4838 		uint32_t format = plane_state->fb->format->format;
4839 		unsigned int i;
4840 
4841 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4842 			if (format == alpha_formats[i]) {
4843 				*per_pixel_alpha = true;
4844 				break;
4845 			}
4846 		}
4847 	}
4848 
4849 	if (plane_state->alpha < 0xffff) {
4850 		*global_alpha = true;
4851 		*global_alpha_value = plane_state->alpha >> 8;
4852 	}
4853 }
4854 
4855 static int
4856 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4857 			    const enum surface_pixel_format format,
4858 			    enum dc_color_space *color_space)
4859 {
4860 	bool full_range;
4861 
4862 	*color_space = COLOR_SPACE_SRGB;
4863 
4864 	/* DRM color properties only affect non-RGB formats. */
4865 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4866 		return 0;
4867 
4868 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4869 
4870 	switch (plane_state->color_encoding) {
4871 	case DRM_COLOR_YCBCR_BT601:
4872 		if (full_range)
4873 			*color_space = COLOR_SPACE_YCBCR601;
4874 		else
4875 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4876 		break;
4877 
4878 	case DRM_COLOR_YCBCR_BT709:
4879 		if (full_range)
4880 			*color_space = COLOR_SPACE_YCBCR709;
4881 		else
4882 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4883 		break;
4884 
4885 	case DRM_COLOR_YCBCR_BT2020:
4886 		if (full_range)
4887 			*color_space = COLOR_SPACE_2020_YCBCR;
4888 		else
4889 			return -EINVAL;
4890 		break;
4891 
4892 	default:
4893 		return -EINVAL;
4894 	}
4895 
4896 	return 0;
4897 }
4898 
4899 static int
4900 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4901 			    const struct drm_plane_state *plane_state,
4902 			    const uint64_t tiling_flags,
4903 			    struct dc_plane_info *plane_info,
4904 			    struct dc_plane_address *address,
4905 			    bool tmz_surface,
4906 			    bool force_disable_dcc)
4907 {
4908 	const struct drm_framebuffer *fb = plane_state->fb;
4909 	const struct amdgpu_framebuffer *afb =
4910 		to_amdgpu_framebuffer(plane_state->fb);
4911 	int ret;
4912 
4913 	memset(plane_info, 0, sizeof(*plane_info));
4914 
4915 	switch (fb->format->format) {
4916 	case DRM_FORMAT_C8:
4917 		plane_info->format =
4918 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4919 		break;
4920 	case DRM_FORMAT_RGB565:
4921 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4922 		break;
4923 	case DRM_FORMAT_XRGB8888:
4924 	case DRM_FORMAT_ARGB8888:
4925 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4926 		break;
4927 	case DRM_FORMAT_XRGB2101010:
4928 	case DRM_FORMAT_ARGB2101010:
4929 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4930 		break;
4931 	case DRM_FORMAT_XBGR2101010:
4932 	case DRM_FORMAT_ABGR2101010:
4933 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4934 		break;
4935 	case DRM_FORMAT_XBGR8888:
4936 	case DRM_FORMAT_ABGR8888:
4937 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4938 		break;
4939 	case DRM_FORMAT_NV21:
4940 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4941 		break;
4942 	case DRM_FORMAT_NV12:
4943 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4944 		break;
4945 	case DRM_FORMAT_P010:
4946 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4947 		break;
4948 	case DRM_FORMAT_XRGB16161616F:
4949 	case DRM_FORMAT_ARGB16161616F:
4950 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4951 		break;
4952 	case DRM_FORMAT_XBGR16161616F:
4953 	case DRM_FORMAT_ABGR16161616F:
4954 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4955 		break;
4956 	default:
4957 		DRM_ERROR(
4958 			"Unsupported screen format %p4cc\n",
4959 			&fb->format->format);
4960 		return -EINVAL;
4961 	}
4962 
4963 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4964 	case DRM_MODE_ROTATE_0:
4965 		plane_info->rotation = ROTATION_ANGLE_0;
4966 		break;
4967 	case DRM_MODE_ROTATE_90:
4968 		plane_info->rotation = ROTATION_ANGLE_90;
4969 		break;
4970 	case DRM_MODE_ROTATE_180:
4971 		plane_info->rotation = ROTATION_ANGLE_180;
4972 		break;
4973 	case DRM_MODE_ROTATE_270:
4974 		plane_info->rotation = ROTATION_ANGLE_270;
4975 		break;
4976 	default:
4977 		plane_info->rotation = ROTATION_ANGLE_0;
4978 		break;
4979 	}
4980 
4981 	plane_info->visible = true;
4982 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4983 
4984 	plane_info->layer_index = 0;
4985 
4986 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4987 					  &plane_info->color_space);
4988 	if (ret)
4989 		return ret;
4990 
4991 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4992 					   plane_info->rotation, tiling_flags,
4993 					   &plane_info->tiling_info,
4994 					   &plane_info->plane_size,
4995 					   &plane_info->dcc, address, tmz_surface,
4996 					   force_disable_dcc);
4997 	if (ret)
4998 		return ret;
4999 
5000 	fill_blending_from_plane_state(
5001 		plane_state, &plane_info->per_pixel_alpha,
5002 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5003 
5004 	return 0;
5005 }
5006 
5007 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5008 				    struct dc_plane_state *dc_plane_state,
5009 				    struct drm_plane_state *plane_state,
5010 				    struct drm_crtc_state *crtc_state)
5011 {
5012 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5013 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5014 	struct dc_scaling_info scaling_info;
5015 	struct dc_plane_info plane_info;
5016 	int ret;
5017 	bool force_disable_dcc = false;
5018 
5019 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5020 	if (ret)
5021 		return ret;
5022 
5023 	dc_plane_state->src_rect = scaling_info.src_rect;
5024 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5025 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5026 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5027 
5028 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5029 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5030 					  afb->tiling_flags,
5031 					  &plane_info,
5032 					  &dc_plane_state->address,
5033 					  afb->tmz_surface,
5034 					  force_disable_dcc);
5035 	if (ret)
5036 		return ret;
5037 
5038 	dc_plane_state->format = plane_info.format;
5039 	dc_plane_state->color_space = plane_info.color_space;
5040 	dc_plane_state->format = plane_info.format;
5041 	dc_plane_state->plane_size = plane_info.plane_size;
5042 	dc_plane_state->rotation = plane_info.rotation;
5043 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5044 	dc_plane_state->stereo_format = plane_info.stereo_format;
5045 	dc_plane_state->tiling_info = plane_info.tiling_info;
5046 	dc_plane_state->visible = plane_info.visible;
5047 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5048 	dc_plane_state->global_alpha = plane_info.global_alpha;
5049 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5050 	dc_plane_state->dcc = plane_info.dcc;
5051 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5052 	dc_plane_state->flip_int_enabled = true;
5053 
5054 	/*
5055 	 * Always set input transfer function, since plane state is refreshed
5056 	 * every time.
5057 	 */
5058 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5059 	if (ret)
5060 		return ret;
5061 
5062 	return 0;
5063 }
5064 
5065 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5066 					   const struct dm_connector_state *dm_state,
5067 					   struct dc_stream_state *stream)
5068 {
5069 	enum amdgpu_rmx_type rmx_type;
5070 
5071 	struct rect src = { 0 }; /* viewport in composition space*/
5072 	struct rect dst = { 0 }; /* stream addressable area */
5073 
5074 	/* no mode. nothing to be done */
5075 	if (!mode)
5076 		return;
5077 
5078 	/* Full screen scaling by default */
5079 	src.width = mode->hdisplay;
5080 	src.height = mode->vdisplay;
5081 	dst.width = stream->timing.h_addressable;
5082 	dst.height = stream->timing.v_addressable;
5083 
5084 	if (dm_state) {
5085 		rmx_type = dm_state->scaling;
5086 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5087 			if (src.width * dst.height <
5088 					src.height * dst.width) {
5089 				/* height needs less upscaling/more downscaling */
5090 				dst.width = src.width *
5091 						dst.height / src.height;
5092 			} else {
5093 				/* width needs less upscaling/more downscaling */
5094 				dst.height = src.height *
5095 						dst.width / src.width;
5096 			}
5097 		} else if (rmx_type == RMX_CENTER) {
5098 			dst = src;
5099 		}
5100 
5101 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5102 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5103 
5104 		if (dm_state->underscan_enable) {
5105 			dst.x += dm_state->underscan_hborder / 2;
5106 			dst.y += dm_state->underscan_vborder / 2;
5107 			dst.width -= dm_state->underscan_hborder;
5108 			dst.height -= dm_state->underscan_vborder;
5109 		}
5110 	}
5111 
5112 	stream->src = src;
5113 	stream->dst = dst;
5114 
5115 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5116 		      dst.x, dst.y, dst.width, dst.height);
5117 
5118 }
5119 
5120 static enum dc_color_depth
5121 convert_color_depth_from_display_info(const struct drm_connector *connector,
5122 				      bool is_y420, int requested_bpc)
5123 {
5124 	uint8_t bpc;
5125 
5126 	if (is_y420) {
5127 		bpc = 8;
5128 
5129 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5130 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5131 			bpc = 16;
5132 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5133 			bpc = 12;
5134 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5135 			bpc = 10;
5136 	} else {
5137 		bpc = (uint8_t)connector->display_info.bpc;
5138 		/* Assume 8 bpc by default if no bpc is specified. */
5139 		bpc = bpc ? bpc : 8;
5140 	}
5141 
5142 	if (requested_bpc > 0) {
5143 		/*
5144 		 * Cap display bpc based on the user requested value.
5145 		 *
5146 		 * The value for state->max_bpc may not correctly updated
5147 		 * depending on when the connector gets added to the state
5148 		 * or if this was called outside of atomic check, so it
5149 		 * can't be used directly.
5150 		 */
5151 		bpc = min_t(u8, bpc, requested_bpc);
5152 
5153 		/* Round down to the nearest even number. */
5154 		bpc = bpc - (bpc & 1);
5155 	}
5156 
5157 	switch (bpc) {
5158 	case 0:
5159 		/*
5160 		 * Temporary Work around, DRM doesn't parse color depth for
5161 		 * EDID revision before 1.4
5162 		 * TODO: Fix edid parsing
5163 		 */
5164 		return COLOR_DEPTH_888;
5165 	case 6:
5166 		return COLOR_DEPTH_666;
5167 	case 8:
5168 		return COLOR_DEPTH_888;
5169 	case 10:
5170 		return COLOR_DEPTH_101010;
5171 	case 12:
5172 		return COLOR_DEPTH_121212;
5173 	case 14:
5174 		return COLOR_DEPTH_141414;
5175 	case 16:
5176 		return COLOR_DEPTH_161616;
5177 	default:
5178 		return COLOR_DEPTH_UNDEFINED;
5179 	}
5180 }
5181 
5182 static enum dc_aspect_ratio
5183 get_aspect_ratio(const struct drm_display_mode *mode_in)
5184 {
5185 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5186 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5187 }
5188 
5189 static enum dc_color_space
5190 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5191 {
5192 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5193 
5194 	switch (dc_crtc_timing->pixel_encoding)	{
5195 	case PIXEL_ENCODING_YCBCR422:
5196 	case PIXEL_ENCODING_YCBCR444:
5197 	case PIXEL_ENCODING_YCBCR420:
5198 	{
5199 		/*
5200 		 * 27030khz is the separation point between HDTV and SDTV
5201 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5202 		 * respectively
5203 		 */
5204 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5205 			if (dc_crtc_timing->flags.Y_ONLY)
5206 				color_space =
5207 					COLOR_SPACE_YCBCR709_LIMITED;
5208 			else
5209 				color_space = COLOR_SPACE_YCBCR709;
5210 		} else {
5211 			if (dc_crtc_timing->flags.Y_ONLY)
5212 				color_space =
5213 					COLOR_SPACE_YCBCR601_LIMITED;
5214 			else
5215 				color_space = COLOR_SPACE_YCBCR601;
5216 		}
5217 
5218 	}
5219 	break;
5220 	case PIXEL_ENCODING_RGB:
5221 		color_space = COLOR_SPACE_SRGB;
5222 		break;
5223 
5224 	default:
5225 		WARN_ON(1);
5226 		break;
5227 	}
5228 
5229 	return color_space;
5230 }
5231 
5232 static bool adjust_colour_depth_from_display_info(
5233 	struct dc_crtc_timing *timing_out,
5234 	const struct drm_display_info *info)
5235 {
5236 	enum dc_color_depth depth = timing_out->display_color_depth;
5237 	int normalized_clk;
5238 	do {
5239 		normalized_clk = timing_out->pix_clk_100hz / 10;
5240 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5241 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5242 			normalized_clk /= 2;
5243 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5244 		switch (depth) {
5245 		case COLOR_DEPTH_888:
5246 			break;
5247 		case COLOR_DEPTH_101010:
5248 			normalized_clk = (normalized_clk * 30) / 24;
5249 			break;
5250 		case COLOR_DEPTH_121212:
5251 			normalized_clk = (normalized_clk * 36) / 24;
5252 			break;
5253 		case COLOR_DEPTH_161616:
5254 			normalized_clk = (normalized_clk * 48) / 24;
5255 			break;
5256 		default:
5257 			/* The above depths are the only ones valid for HDMI. */
5258 			return false;
5259 		}
5260 		if (normalized_clk <= info->max_tmds_clock) {
5261 			timing_out->display_color_depth = depth;
5262 			return true;
5263 		}
5264 	} while (--depth > COLOR_DEPTH_666);
5265 	return false;
5266 }
5267 
5268 static void fill_stream_properties_from_drm_display_mode(
5269 	struct dc_stream_state *stream,
5270 	const struct drm_display_mode *mode_in,
5271 	const struct drm_connector *connector,
5272 	const struct drm_connector_state *connector_state,
5273 	const struct dc_stream_state *old_stream,
5274 	int requested_bpc)
5275 {
5276 	struct dc_crtc_timing *timing_out = &stream->timing;
5277 	const struct drm_display_info *info = &connector->display_info;
5278 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5279 	struct hdmi_vendor_infoframe hv_frame;
5280 	struct hdmi_avi_infoframe avi_frame;
5281 
5282 	memset(&hv_frame, 0, sizeof(hv_frame));
5283 	memset(&avi_frame, 0, sizeof(avi_frame));
5284 
5285 	timing_out->h_border_left = 0;
5286 	timing_out->h_border_right = 0;
5287 	timing_out->v_border_top = 0;
5288 	timing_out->v_border_bottom = 0;
5289 	/* TODO: un-hardcode */
5290 	if (drm_mode_is_420_only(info, mode_in)
5291 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5292 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5293 	else if (drm_mode_is_420_also(info, mode_in)
5294 			&& aconnector->force_yuv420_output)
5295 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5296 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5297 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5298 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5299 	else
5300 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5301 
5302 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5303 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5304 		connector,
5305 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5306 		requested_bpc);
5307 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5308 	timing_out->hdmi_vic = 0;
5309 
5310 	if(old_stream) {
5311 		timing_out->vic = old_stream->timing.vic;
5312 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5313 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5314 	} else {
5315 		timing_out->vic = drm_match_cea_mode(mode_in);
5316 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5317 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5318 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5319 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5320 	}
5321 
5322 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5323 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5324 		timing_out->vic = avi_frame.video_code;
5325 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5326 		timing_out->hdmi_vic = hv_frame.vic;
5327 	}
5328 
5329 	if (is_freesync_video_mode(mode_in, aconnector)) {
5330 		timing_out->h_addressable = mode_in->hdisplay;
5331 		timing_out->h_total = mode_in->htotal;
5332 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5333 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5334 		timing_out->v_total = mode_in->vtotal;
5335 		timing_out->v_addressable = mode_in->vdisplay;
5336 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5337 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5338 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5339 	} else {
5340 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5341 		timing_out->h_total = mode_in->crtc_htotal;
5342 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5343 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5344 		timing_out->v_total = mode_in->crtc_vtotal;
5345 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5346 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5347 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5348 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5349 	}
5350 
5351 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5352 
5353 	stream->output_color_space = get_output_color_space(timing_out);
5354 
5355 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5356 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5357 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5358 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5359 		    drm_mode_is_420_also(info, mode_in) &&
5360 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5361 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5362 			adjust_colour_depth_from_display_info(timing_out, info);
5363 		}
5364 	}
5365 }
5366 
5367 static void fill_audio_info(struct audio_info *audio_info,
5368 			    const struct drm_connector *drm_connector,
5369 			    const struct dc_sink *dc_sink)
5370 {
5371 	int i = 0;
5372 	int cea_revision = 0;
5373 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5374 
5375 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5376 	audio_info->product_id = edid_caps->product_id;
5377 
5378 	cea_revision = drm_connector->display_info.cea_rev;
5379 
5380 	strscpy(audio_info->display_name,
5381 		edid_caps->display_name,
5382 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5383 
5384 	if (cea_revision >= 3) {
5385 		audio_info->mode_count = edid_caps->audio_mode_count;
5386 
5387 		for (i = 0; i < audio_info->mode_count; ++i) {
5388 			audio_info->modes[i].format_code =
5389 					(enum audio_format_code)
5390 					(edid_caps->audio_modes[i].format_code);
5391 			audio_info->modes[i].channel_count =
5392 					edid_caps->audio_modes[i].channel_count;
5393 			audio_info->modes[i].sample_rates.all =
5394 					edid_caps->audio_modes[i].sample_rate;
5395 			audio_info->modes[i].sample_size =
5396 					edid_caps->audio_modes[i].sample_size;
5397 		}
5398 	}
5399 
5400 	audio_info->flags.all = edid_caps->speaker_flags;
5401 
5402 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5403 	if (drm_connector->latency_present[0]) {
5404 		audio_info->video_latency = drm_connector->video_latency[0];
5405 		audio_info->audio_latency = drm_connector->audio_latency[0];
5406 	}
5407 
5408 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5409 
5410 }
5411 
5412 static void
5413 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5414 				      struct drm_display_mode *dst_mode)
5415 {
5416 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5417 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5418 	dst_mode->crtc_clock = src_mode->crtc_clock;
5419 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5420 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5421 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5422 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5423 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5424 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5425 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5426 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5427 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5428 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5429 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5430 }
5431 
5432 static void
5433 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5434 					const struct drm_display_mode *native_mode,
5435 					bool scale_enabled)
5436 {
5437 	if (scale_enabled) {
5438 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5439 	} else if (native_mode->clock == drm_mode->clock &&
5440 			native_mode->htotal == drm_mode->htotal &&
5441 			native_mode->vtotal == drm_mode->vtotal) {
5442 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5443 	} else {
5444 		/* no scaling nor amdgpu inserted, no need to patch */
5445 	}
5446 }
5447 
5448 static struct dc_sink *
5449 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5450 {
5451 	struct dc_sink_init_data sink_init_data = { 0 };
5452 	struct dc_sink *sink = NULL;
5453 	sink_init_data.link = aconnector->dc_link;
5454 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5455 
5456 	sink = dc_sink_create(&sink_init_data);
5457 	if (!sink) {
5458 		DRM_ERROR("Failed to create sink!\n");
5459 		return NULL;
5460 	}
5461 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5462 
5463 	return sink;
5464 }
5465 
5466 static void set_multisync_trigger_params(
5467 		struct dc_stream_state *stream)
5468 {
5469 	struct dc_stream_state *master = NULL;
5470 
5471 	if (stream->triggered_crtc_reset.enabled) {
5472 		master = stream->triggered_crtc_reset.event_source;
5473 		stream->triggered_crtc_reset.event =
5474 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5475 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5476 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5477 	}
5478 }
5479 
5480 static void set_master_stream(struct dc_stream_state *stream_set[],
5481 			      int stream_count)
5482 {
5483 	int j, highest_rfr = 0, master_stream = 0;
5484 
5485 	for (j = 0;  j < stream_count; j++) {
5486 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5487 			int refresh_rate = 0;
5488 
5489 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5490 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5491 			if (refresh_rate > highest_rfr) {
5492 				highest_rfr = refresh_rate;
5493 				master_stream = j;
5494 			}
5495 		}
5496 	}
5497 	for (j = 0;  j < stream_count; j++) {
5498 		if (stream_set[j])
5499 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5500 	}
5501 }
5502 
5503 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5504 {
5505 	int i = 0;
5506 	struct dc_stream_state *stream;
5507 
5508 	if (context->stream_count < 2)
5509 		return;
5510 	for (i = 0; i < context->stream_count ; i++) {
5511 		if (!context->streams[i])
5512 			continue;
5513 		/*
5514 		 * TODO: add a function to read AMD VSDB bits and set
5515 		 * crtc_sync_master.multi_sync_enabled flag
5516 		 * For now it's set to false
5517 		 */
5518 	}
5519 
5520 	set_master_stream(context->streams, context->stream_count);
5521 
5522 	for (i = 0; i < context->stream_count ; i++) {
5523 		stream = context->streams[i];
5524 
5525 		if (!stream)
5526 			continue;
5527 
5528 		set_multisync_trigger_params(stream);
5529 	}
5530 }
5531 
5532 static struct drm_display_mode *
5533 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5534 			  bool use_probed_modes)
5535 {
5536 	struct drm_display_mode *m, *m_pref = NULL;
5537 	u16 current_refresh, highest_refresh;
5538 	struct list_head *list_head = use_probed_modes ?
5539 						    &aconnector->base.probed_modes :
5540 						    &aconnector->base.modes;
5541 
5542 	if (aconnector->freesync_vid_base.clock != 0)
5543 		return &aconnector->freesync_vid_base;
5544 
5545 	/* Find the preferred mode */
5546 	list_for_each_entry (m, list_head, head) {
5547 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5548 			m_pref = m;
5549 			break;
5550 		}
5551 	}
5552 
5553 	if (!m_pref) {
5554 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5555 		m_pref = list_first_entry_or_null(
5556 			&aconnector->base.modes, struct drm_display_mode, head);
5557 		if (!m_pref) {
5558 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5559 			return NULL;
5560 		}
5561 	}
5562 
5563 	highest_refresh = drm_mode_vrefresh(m_pref);
5564 
5565 	/*
5566 	 * Find the mode with highest refresh rate with same resolution.
5567 	 * For some monitors, preferred mode is not the mode with highest
5568 	 * supported refresh rate.
5569 	 */
5570 	list_for_each_entry (m, list_head, head) {
5571 		current_refresh  = drm_mode_vrefresh(m);
5572 
5573 		if (m->hdisplay == m_pref->hdisplay &&
5574 		    m->vdisplay == m_pref->vdisplay &&
5575 		    highest_refresh < current_refresh) {
5576 			highest_refresh = current_refresh;
5577 			m_pref = m;
5578 		}
5579 	}
5580 
5581 	aconnector->freesync_vid_base = *m_pref;
5582 	return m_pref;
5583 }
5584 
5585 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5586 				   struct amdgpu_dm_connector *aconnector)
5587 {
5588 	struct drm_display_mode *high_mode;
5589 	int timing_diff;
5590 
5591 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5592 	if (!high_mode || !mode)
5593 		return false;
5594 
5595 	timing_diff = high_mode->vtotal - mode->vtotal;
5596 
5597 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5598 	    high_mode->hdisplay != mode->hdisplay ||
5599 	    high_mode->vdisplay != mode->vdisplay ||
5600 	    high_mode->hsync_start != mode->hsync_start ||
5601 	    high_mode->hsync_end != mode->hsync_end ||
5602 	    high_mode->htotal != mode->htotal ||
5603 	    high_mode->hskew != mode->hskew ||
5604 	    high_mode->vscan != mode->vscan ||
5605 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5606 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5607 		return false;
5608 	else
5609 		return true;
5610 }
5611 
5612 static struct dc_stream_state *
5613 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5614 		       const struct drm_display_mode *drm_mode,
5615 		       const struct dm_connector_state *dm_state,
5616 		       const struct dc_stream_state *old_stream,
5617 		       int requested_bpc)
5618 {
5619 	struct drm_display_mode *preferred_mode = NULL;
5620 	struct drm_connector *drm_connector;
5621 	const struct drm_connector_state *con_state =
5622 		dm_state ? &dm_state->base : NULL;
5623 	struct dc_stream_state *stream = NULL;
5624 	struct drm_display_mode mode = *drm_mode;
5625 	struct drm_display_mode saved_mode;
5626 	struct drm_display_mode *freesync_mode = NULL;
5627 	bool native_mode_found = false;
5628 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5629 	int mode_refresh;
5630 	int preferred_refresh = 0;
5631 #if defined(CONFIG_DRM_AMD_DC_DCN)
5632 	struct dsc_dec_dpcd_caps dsc_caps;
5633 	uint32_t link_bandwidth_kbps;
5634 #endif
5635 	struct dc_sink *sink = NULL;
5636 
5637 	memset(&saved_mode, 0, sizeof(saved_mode));
5638 
5639 	if (aconnector == NULL) {
5640 		DRM_ERROR("aconnector is NULL!\n");
5641 		return stream;
5642 	}
5643 
5644 	drm_connector = &aconnector->base;
5645 
5646 	if (!aconnector->dc_sink) {
5647 		sink = create_fake_sink(aconnector);
5648 		if (!sink)
5649 			return stream;
5650 	} else {
5651 		sink = aconnector->dc_sink;
5652 		dc_sink_retain(sink);
5653 	}
5654 
5655 	stream = dc_create_stream_for_sink(sink);
5656 
5657 	if (stream == NULL) {
5658 		DRM_ERROR("Failed to create stream for sink!\n");
5659 		goto finish;
5660 	}
5661 
5662 	stream->dm_stream_context = aconnector;
5663 
5664 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5665 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5666 
5667 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5668 		/* Search for preferred mode */
5669 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5670 			native_mode_found = true;
5671 			break;
5672 		}
5673 	}
5674 	if (!native_mode_found)
5675 		preferred_mode = list_first_entry_or_null(
5676 				&aconnector->base.modes,
5677 				struct drm_display_mode,
5678 				head);
5679 
5680 	mode_refresh = drm_mode_vrefresh(&mode);
5681 
5682 	if (preferred_mode == NULL) {
5683 		/*
5684 		 * This may not be an error, the use case is when we have no
5685 		 * usermode calls to reset and set mode upon hotplug. In this
5686 		 * case, we call set mode ourselves to restore the previous mode
5687 		 * and the modelist may not be filled in in time.
5688 		 */
5689 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5690 	} else {
5691 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5692 				 is_freesync_video_mode(&mode, aconnector);
5693 		if (recalculate_timing) {
5694 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5695 			saved_mode = mode;
5696 			mode = *freesync_mode;
5697 		} else {
5698 			decide_crtc_timing_for_drm_display_mode(
5699 				&mode, preferred_mode,
5700 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5701 		}
5702 
5703 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5704 	}
5705 
5706 	if (recalculate_timing)
5707 		drm_mode_set_crtcinfo(&saved_mode, 0);
5708 	else if (!dm_state)
5709 		drm_mode_set_crtcinfo(&mode, 0);
5710 
5711        /*
5712 	* If scaling is enabled and refresh rate didn't change
5713 	* we copy the vic and polarities of the old timings
5714 	*/
5715 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5716 		fill_stream_properties_from_drm_display_mode(
5717 			stream, &mode, &aconnector->base, con_state, NULL,
5718 			requested_bpc);
5719 	else
5720 		fill_stream_properties_from_drm_display_mode(
5721 			stream, &mode, &aconnector->base, con_state, old_stream,
5722 			requested_bpc);
5723 
5724 	stream->timing.flags.DSC = 0;
5725 
5726 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5727 #if defined(CONFIG_DRM_AMD_DC_DCN)
5728 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5729 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5730 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5731 				      &dsc_caps);
5732 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5733 							     dc_link_get_link_cap(aconnector->dc_link));
5734 
5735 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5736 			/* Set DSC policy according to dsc_clock_en */
5737 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5738 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5739 
5740 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5741 						  &dsc_caps,
5742 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5743 						  0,
5744 						  link_bandwidth_kbps,
5745 						  &stream->timing,
5746 						  &stream->timing.dsc_cfg))
5747 				stream->timing.flags.DSC = 1;
5748 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5749 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5750 				stream->timing.flags.DSC = 1;
5751 
5752 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5753 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5754 
5755 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5756 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5757 
5758 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5759 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5760 		}
5761 #endif
5762 	}
5763 
5764 	update_stream_scaling_settings(&mode, dm_state, stream);
5765 
5766 	fill_audio_info(
5767 		&stream->audio_info,
5768 		drm_connector,
5769 		sink);
5770 
5771 	update_stream_signal(stream, sink);
5772 
5773 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5774 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5775 
5776 	if (stream->link->psr_settings.psr_feature_enabled) {
5777 		//
5778 		// should decide stream support vsc sdp colorimetry capability
5779 		// before building vsc info packet
5780 		//
5781 		stream->use_vsc_sdp_for_colorimetry = false;
5782 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5783 			stream->use_vsc_sdp_for_colorimetry =
5784 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5785 		} else {
5786 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5787 				stream->use_vsc_sdp_for_colorimetry = true;
5788 		}
5789 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5790 	}
5791 finish:
5792 	dc_sink_release(sink);
5793 
5794 	return stream;
5795 }
5796 
5797 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5798 {
5799 	drm_crtc_cleanup(crtc);
5800 	kfree(crtc);
5801 }
5802 
5803 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5804 				  struct drm_crtc_state *state)
5805 {
5806 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5807 
5808 	/* TODO Destroy dc_stream objects are stream object is flattened */
5809 	if (cur->stream)
5810 		dc_stream_release(cur->stream);
5811 
5812 
5813 	__drm_atomic_helper_crtc_destroy_state(state);
5814 
5815 
5816 	kfree(state);
5817 }
5818 
5819 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5820 {
5821 	struct dm_crtc_state *state;
5822 
5823 	if (crtc->state)
5824 		dm_crtc_destroy_state(crtc, crtc->state);
5825 
5826 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5827 	if (WARN_ON(!state))
5828 		return;
5829 
5830 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5831 }
5832 
5833 static struct drm_crtc_state *
5834 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5835 {
5836 	struct dm_crtc_state *state, *cur;
5837 
5838 	cur = to_dm_crtc_state(crtc->state);
5839 
5840 	if (WARN_ON(!crtc->state))
5841 		return NULL;
5842 
5843 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5844 	if (!state)
5845 		return NULL;
5846 
5847 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5848 
5849 	if (cur->stream) {
5850 		state->stream = cur->stream;
5851 		dc_stream_retain(state->stream);
5852 	}
5853 
5854 	state->active_planes = cur->active_planes;
5855 	state->vrr_infopacket = cur->vrr_infopacket;
5856 	state->abm_level = cur->abm_level;
5857 	state->vrr_supported = cur->vrr_supported;
5858 	state->freesync_config = cur->freesync_config;
5859 	state->cm_has_degamma = cur->cm_has_degamma;
5860 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5861 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5862 
5863 	return &state->base;
5864 }
5865 
5866 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5867 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5868 {
5869 	crtc_debugfs_init(crtc);
5870 
5871 	return 0;
5872 }
5873 #endif
5874 
5875 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5876 {
5877 	enum dc_irq_source irq_source;
5878 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5879 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5880 	int rc;
5881 
5882 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5883 
5884 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5885 
5886 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5887 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5888 	return rc;
5889 }
5890 
5891 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5892 {
5893 	enum dc_irq_source irq_source;
5894 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5895 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5896 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5897 #if defined(CONFIG_DRM_AMD_DC_DCN)
5898 	struct amdgpu_display_manager *dm = &adev->dm;
5899 	unsigned long flags;
5900 #endif
5901 	int rc = 0;
5902 
5903 	if (enable) {
5904 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5905 		if (amdgpu_dm_vrr_active(acrtc_state))
5906 			rc = dm_set_vupdate_irq(crtc, true);
5907 	} else {
5908 		/* vblank irq off -> vupdate irq off */
5909 		rc = dm_set_vupdate_irq(crtc, false);
5910 	}
5911 
5912 	if (rc)
5913 		return rc;
5914 
5915 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5916 
5917 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5918 		return -EBUSY;
5919 
5920 	if (amdgpu_in_reset(adev))
5921 		return 0;
5922 
5923 #if defined(CONFIG_DRM_AMD_DC_DCN)
5924 	spin_lock_irqsave(&dm->vblank_lock, flags);
5925 	dm->vblank_workqueue->dm = dm;
5926 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5927 	dm->vblank_workqueue->enable = enable;
5928 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5929 	schedule_work(&dm->vblank_workqueue->mall_work);
5930 #endif
5931 
5932 	return 0;
5933 }
5934 
5935 static int dm_enable_vblank(struct drm_crtc *crtc)
5936 {
5937 	return dm_set_vblank(crtc, true);
5938 }
5939 
5940 static void dm_disable_vblank(struct drm_crtc *crtc)
5941 {
5942 	dm_set_vblank(crtc, false);
5943 }
5944 
5945 /* Implemented only the options currently availible for the driver */
5946 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5947 	.reset = dm_crtc_reset_state,
5948 	.destroy = amdgpu_dm_crtc_destroy,
5949 	.set_config = drm_atomic_helper_set_config,
5950 	.page_flip = drm_atomic_helper_page_flip,
5951 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5952 	.atomic_destroy_state = dm_crtc_destroy_state,
5953 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5954 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5955 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5956 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5957 	.enable_vblank = dm_enable_vblank,
5958 	.disable_vblank = dm_disable_vblank,
5959 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5960 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5961 	.late_register = amdgpu_dm_crtc_late_register,
5962 #endif
5963 };
5964 
5965 static enum drm_connector_status
5966 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5967 {
5968 	bool connected;
5969 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5970 
5971 	/*
5972 	 * Notes:
5973 	 * 1. This interface is NOT called in context of HPD irq.
5974 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5975 	 * makes it a bad place for *any* MST-related activity.
5976 	 */
5977 
5978 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5979 	    !aconnector->fake_enable)
5980 		connected = (aconnector->dc_sink != NULL);
5981 	else
5982 		connected = (aconnector->base.force == DRM_FORCE_ON);
5983 
5984 	update_subconnector_property(aconnector);
5985 
5986 	return (connected ? connector_status_connected :
5987 			connector_status_disconnected);
5988 }
5989 
5990 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5991 					    struct drm_connector_state *connector_state,
5992 					    struct drm_property *property,
5993 					    uint64_t val)
5994 {
5995 	struct drm_device *dev = connector->dev;
5996 	struct amdgpu_device *adev = drm_to_adev(dev);
5997 	struct dm_connector_state *dm_old_state =
5998 		to_dm_connector_state(connector->state);
5999 	struct dm_connector_state *dm_new_state =
6000 		to_dm_connector_state(connector_state);
6001 
6002 	int ret = -EINVAL;
6003 
6004 	if (property == dev->mode_config.scaling_mode_property) {
6005 		enum amdgpu_rmx_type rmx_type;
6006 
6007 		switch (val) {
6008 		case DRM_MODE_SCALE_CENTER:
6009 			rmx_type = RMX_CENTER;
6010 			break;
6011 		case DRM_MODE_SCALE_ASPECT:
6012 			rmx_type = RMX_ASPECT;
6013 			break;
6014 		case DRM_MODE_SCALE_FULLSCREEN:
6015 			rmx_type = RMX_FULL;
6016 			break;
6017 		case DRM_MODE_SCALE_NONE:
6018 		default:
6019 			rmx_type = RMX_OFF;
6020 			break;
6021 		}
6022 
6023 		if (dm_old_state->scaling == rmx_type)
6024 			return 0;
6025 
6026 		dm_new_state->scaling = rmx_type;
6027 		ret = 0;
6028 	} else if (property == adev->mode_info.underscan_hborder_property) {
6029 		dm_new_state->underscan_hborder = val;
6030 		ret = 0;
6031 	} else if (property == adev->mode_info.underscan_vborder_property) {
6032 		dm_new_state->underscan_vborder = val;
6033 		ret = 0;
6034 	} else if (property == adev->mode_info.underscan_property) {
6035 		dm_new_state->underscan_enable = val;
6036 		ret = 0;
6037 	} else if (property == adev->mode_info.abm_level_property) {
6038 		dm_new_state->abm_level = val;
6039 		ret = 0;
6040 	}
6041 
6042 	return ret;
6043 }
6044 
6045 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6046 					    const struct drm_connector_state *state,
6047 					    struct drm_property *property,
6048 					    uint64_t *val)
6049 {
6050 	struct drm_device *dev = connector->dev;
6051 	struct amdgpu_device *adev = drm_to_adev(dev);
6052 	struct dm_connector_state *dm_state =
6053 		to_dm_connector_state(state);
6054 	int ret = -EINVAL;
6055 
6056 	if (property == dev->mode_config.scaling_mode_property) {
6057 		switch (dm_state->scaling) {
6058 		case RMX_CENTER:
6059 			*val = DRM_MODE_SCALE_CENTER;
6060 			break;
6061 		case RMX_ASPECT:
6062 			*val = DRM_MODE_SCALE_ASPECT;
6063 			break;
6064 		case RMX_FULL:
6065 			*val = DRM_MODE_SCALE_FULLSCREEN;
6066 			break;
6067 		case RMX_OFF:
6068 		default:
6069 			*val = DRM_MODE_SCALE_NONE;
6070 			break;
6071 		}
6072 		ret = 0;
6073 	} else if (property == adev->mode_info.underscan_hborder_property) {
6074 		*val = dm_state->underscan_hborder;
6075 		ret = 0;
6076 	} else if (property == adev->mode_info.underscan_vborder_property) {
6077 		*val = dm_state->underscan_vborder;
6078 		ret = 0;
6079 	} else if (property == adev->mode_info.underscan_property) {
6080 		*val = dm_state->underscan_enable;
6081 		ret = 0;
6082 	} else if (property == adev->mode_info.abm_level_property) {
6083 		*val = dm_state->abm_level;
6084 		ret = 0;
6085 	}
6086 
6087 	return ret;
6088 }
6089 
6090 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6091 {
6092 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6093 
6094 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6095 }
6096 
6097 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6098 {
6099 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6100 	const struct dc_link *link = aconnector->dc_link;
6101 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6102 	struct amdgpu_display_manager *dm = &adev->dm;
6103 
6104 	/*
6105 	 * Call only if mst_mgr was iniitalized before since it's not done
6106 	 * for all connector types.
6107 	 */
6108 	if (aconnector->mst_mgr.dev)
6109 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6110 
6111 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6112 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6113 
6114 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6115 	    link->type != dc_connection_none &&
6116 	    dm->backlight_dev) {
6117 		backlight_device_unregister(dm->backlight_dev);
6118 		dm->backlight_dev = NULL;
6119 	}
6120 #endif
6121 
6122 	if (aconnector->dc_em_sink)
6123 		dc_sink_release(aconnector->dc_em_sink);
6124 	aconnector->dc_em_sink = NULL;
6125 	if (aconnector->dc_sink)
6126 		dc_sink_release(aconnector->dc_sink);
6127 	aconnector->dc_sink = NULL;
6128 
6129 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6130 	drm_connector_unregister(connector);
6131 	drm_connector_cleanup(connector);
6132 	if (aconnector->i2c) {
6133 		i2c_del_adapter(&aconnector->i2c->base);
6134 		kfree(aconnector->i2c);
6135 	}
6136 	kfree(aconnector->dm_dp_aux.aux.name);
6137 
6138 	kfree(connector);
6139 }
6140 
6141 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6142 {
6143 	struct dm_connector_state *state =
6144 		to_dm_connector_state(connector->state);
6145 
6146 	if (connector->state)
6147 		__drm_atomic_helper_connector_destroy_state(connector->state);
6148 
6149 	kfree(state);
6150 
6151 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6152 
6153 	if (state) {
6154 		state->scaling = RMX_OFF;
6155 		state->underscan_enable = false;
6156 		state->underscan_hborder = 0;
6157 		state->underscan_vborder = 0;
6158 		state->base.max_requested_bpc = 8;
6159 		state->vcpi_slots = 0;
6160 		state->pbn = 0;
6161 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6162 			state->abm_level = amdgpu_dm_abm_level;
6163 
6164 		__drm_atomic_helper_connector_reset(connector, &state->base);
6165 	}
6166 }
6167 
6168 struct drm_connector_state *
6169 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6170 {
6171 	struct dm_connector_state *state =
6172 		to_dm_connector_state(connector->state);
6173 
6174 	struct dm_connector_state *new_state =
6175 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6176 
6177 	if (!new_state)
6178 		return NULL;
6179 
6180 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6181 
6182 	new_state->freesync_capable = state->freesync_capable;
6183 	new_state->abm_level = state->abm_level;
6184 	new_state->scaling = state->scaling;
6185 	new_state->underscan_enable = state->underscan_enable;
6186 	new_state->underscan_hborder = state->underscan_hborder;
6187 	new_state->underscan_vborder = state->underscan_vborder;
6188 	new_state->vcpi_slots = state->vcpi_slots;
6189 	new_state->pbn = state->pbn;
6190 	return &new_state->base;
6191 }
6192 
6193 static int
6194 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6195 {
6196 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6197 		to_amdgpu_dm_connector(connector);
6198 	int r;
6199 
6200 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6201 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6202 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6203 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6204 		if (r)
6205 			return r;
6206 	}
6207 
6208 #if defined(CONFIG_DEBUG_FS)
6209 	connector_debugfs_init(amdgpu_dm_connector);
6210 #endif
6211 
6212 	return 0;
6213 }
6214 
6215 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6216 	.reset = amdgpu_dm_connector_funcs_reset,
6217 	.detect = amdgpu_dm_connector_detect,
6218 	.fill_modes = drm_helper_probe_single_connector_modes,
6219 	.destroy = amdgpu_dm_connector_destroy,
6220 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6221 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6222 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6223 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6224 	.late_register = amdgpu_dm_connector_late_register,
6225 	.early_unregister = amdgpu_dm_connector_unregister
6226 };
6227 
6228 static int get_modes(struct drm_connector *connector)
6229 {
6230 	return amdgpu_dm_connector_get_modes(connector);
6231 }
6232 
6233 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6234 {
6235 	struct dc_sink_init_data init_params = {
6236 			.link = aconnector->dc_link,
6237 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6238 	};
6239 	struct edid *edid;
6240 
6241 	if (!aconnector->base.edid_blob_ptr) {
6242 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6243 				aconnector->base.name);
6244 
6245 		aconnector->base.force = DRM_FORCE_OFF;
6246 		aconnector->base.override_edid = false;
6247 		return;
6248 	}
6249 
6250 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6251 
6252 	aconnector->edid = edid;
6253 
6254 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6255 		aconnector->dc_link,
6256 		(uint8_t *)edid,
6257 		(edid->extensions + 1) * EDID_LENGTH,
6258 		&init_params);
6259 
6260 	if (aconnector->base.force == DRM_FORCE_ON) {
6261 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6262 		aconnector->dc_link->local_sink :
6263 		aconnector->dc_em_sink;
6264 		dc_sink_retain(aconnector->dc_sink);
6265 	}
6266 }
6267 
6268 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6269 {
6270 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6271 
6272 	/*
6273 	 * In case of headless boot with force on for DP managed connector
6274 	 * Those settings have to be != 0 to get initial modeset
6275 	 */
6276 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6277 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6278 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6279 	}
6280 
6281 
6282 	aconnector->base.override_edid = true;
6283 	create_eml_sink(aconnector);
6284 }
6285 
6286 static struct dc_stream_state *
6287 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6288 				const struct drm_display_mode *drm_mode,
6289 				const struct dm_connector_state *dm_state,
6290 				const struct dc_stream_state *old_stream)
6291 {
6292 	struct drm_connector *connector = &aconnector->base;
6293 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6294 	struct dc_stream_state *stream;
6295 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6296 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6297 	enum dc_status dc_result = DC_OK;
6298 
6299 	do {
6300 		stream = create_stream_for_sink(aconnector, drm_mode,
6301 						dm_state, old_stream,
6302 						requested_bpc);
6303 		if (stream == NULL) {
6304 			DRM_ERROR("Failed to create stream for sink!\n");
6305 			break;
6306 		}
6307 
6308 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6309 
6310 		if (dc_result != DC_OK) {
6311 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6312 				      drm_mode->hdisplay,
6313 				      drm_mode->vdisplay,
6314 				      drm_mode->clock,
6315 				      dc_result,
6316 				      dc_status_to_str(dc_result));
6317 
6318 			dc_stream_release(stream);
6319 			stream = NULL;
6320 			requested_bpc -= 2; /* lower bpc to retry validation */
6321 		}
6322 
6323 	} while (stream == NULL && requested_bpc >= 6);
6324 
6325 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6326 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6327 
6328 		aconnector->force_yuv420_output = true;
6329 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6330 						dm_state, old_stream);
6331 		aconnector->force_yuv420_output = false;
6332 	}
6333 
6334 	return stream;
6335 }
6336 
6337 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6338 				   struct drm_display_mode *mode)
6339 {
6340 	int result = MODE_ERROR;
6341 	struct dc_sink *dc_sink;
6342 	/* TODO: Unhardcode stream count */
6343 	struct dc_stream_state *stream;
6344 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6345 
6346 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6347 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6348 		return result;
6349 
6350 	/*
6351 	 * Only run this the first time mode_valid is called to initilialize
6352 	 * EDID mgmt
6353 	 */
6354 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6355 		!aconnector->dc_em_sink)
6356 		handle_edid_mgmt(aconnector);
6357 
6358 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6359 
6360 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6361 				aconnector->base.force != DRM_FORCE_ON) {
6362 		DRM_ERROR("dc_sink is NULL!\n");
6363 		goto fail;
6364 	}
6365 
6366 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6367 	if (stream) {
6368 		dc_stream_release(stream);
6369 		result = MODE_OK;
6370 	}
6371 
6372 fail:
6373 	/* TODO: error handling*/
6374 	return result;
6375 }
6376 
6377 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6378 				struct dc_info_packet *out)
6379 {
6380 	struct hdmi_drm_infoframe frame;
6381 	unsigned char buf[30]; /* 26 + 4 */
6382 	ssize_t len;
6383 	int ret, i;
6384 
6385 	memset(out, 0, sizeof(*out));
6386 
6387 	if (!state->hdr_output_metadata)
6388 		return 0;
6389 
6390 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6391 	if (ret)
6392 		return ret;
6393 
6394 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6395 	if (len < 0)
6396 		return (int)len;
6397 
6398 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6399 	if (len != 30)
6400 		return -EINVAL;
6401 
6402 	/* Prepare the infopacket for DC. */
6403 	switch (state->connector->connector_type) {
6404 	case DRM_MODE_CONNECTOR_HDMIA:
6405 		out->hb0 = 0x87; /* type */
6406 		out->hb1 = 0x01; /* version */
6407 		out->hb2 = 0x1A; /* length */
6408 		out->sb[0] = buf[3]; /* checksum */
6409 		i = 1;
6410 		break;
6411 
6412 	case DRM_MODE_CONNECTOR_DisplayPort:
6413 	case DRM_MODE_CONNECTOR_eDP:
6414 		out->hb0 = 0x00; /* sdp id, zero */
6415 		out->hb1 = 0x87; /* type */
6416 		out->hb2 = 0x1D; /* payload len - 1 */
6417 		out->hb3 = (0x13 << 2); /* sdp version */
6418 		out->sb[0] = 0x01; /* version */
6419 		out->sb[1] = 0x1A; /* length */
6420 		i = 2;
6421 		break;
6422 
6423 	default:
6424 		return -EINVAL;
6425 	}
6426 
6427 	memcpy(&out->sb[i], &buf[4], 26);
6428 	out->valid = true;
6429 
6430 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6431 		       sizeof(out->sb), false);
6432 
6433 	return 0;
6434 }
6435 
6436 static int
6437 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6438 				 struct drm_atomic_state *state)
6439 {
6440 	struct drm_connector_state *new_con_state =
6441 		drm_atomic_get_new_connector_state(state, conn);
6442 	struct drm_connector_state *old_con_state =
6443 		drm_atomic_get_old_connector_state(state, conn);
6444 	struct drm_crtc *crtc = new_con_state->crtc;
6445 	struct drm_crtc_state *new_crtc_state;
6446 	int ret;
6447 
6448 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6449 
6450 	if (!crtc)
6451 		return 0;
6452 
6453 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6454 		struct dc_info_packet hdr_infopacket;
6455 
6456 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6457 		if (ret)
6458 			return ret;
6459 
6460 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6461 		if (IS_ERR(new_crtc_state))
6462 			return PTR_ERR(new_crtc_state);
6463 
6464 		/*
6465 		 * DC considers the stream backends changed if the
6466 		 * static metadata changes. Forcing the modeset also
6467 		 * gives a simple way for userspace to switch from
6468 		 * 8bpc to 10bpc when setting the metadata to enter
6469 		 * or exit HDR.
6470 		 *
6471 		 * Changing the static metadata after it's been
6472 		 * set is permissible, however. So only force a
6473 		 * modeset if we're entering or exiting HDR.
6474 		 */
6475 		new_crtc_state->mode_changed =
6476 			!old_con_state->hdr_output_metadata ||
6477 			!new_con_state->hdr_output_metadata;
6478 	}
6479 
6480 	return 0;
6481 }
6482 
6483 static const struct drm_connector_helper_funcs
6484 amdgpu_dm_connector_helper_funcs = {
6485 	/*
6486 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6487 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6488 	 * are missing after user start lightdm. So we need to renew modes list.
6489 	 * in get_modes call back, not just return the modes count
6490 	 */
6491 	.get_modes = get_modes,
6492 	.mode_valid = amdgpu_dm_connector_mode_valid,
6493 	.atomic_check = amdgpu_dm_connector_atomic_check,
6494 };
6495 
6496 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6497 {
6498 }
6499 
6500 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6501 {
6502 	struct drm_atomic_state *state = new_crtc_state->state;
6503 	struct drm_plane *plane;
6504 	int num_active = 0;
6505 
6506 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6507 		struct drm_plane_state *new_plane_state;
6508 
6509 		/* Cursor planes are "fake". */
6510 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6511 			continue;
6512 
6513 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6514 
6515 		if (!new_plane_state) {
6516 			/*
6517 			 * The plane is enable on the CRTC and hasn't changed
6518 			 * state. This means that it previously passed
6519 			 * validation and is therefore enabled.
6520 			 */
6521 			num_active += 1;
6522 			continue;
6523 		}
6524 
6525 		/* We need a framebuffer to be considered enabled. */
6526 		num_active += (new_plane_state->fb != NULL);
6527 	}
6528 
6529 	return num_active;
6530 }
6531 
6532 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6533 					 struct drm_crtc_state *new_crtc_state)
6534 {
6535 	struct dm_crtc_state *dm_new_crtc_state =
6536 		to_dm_crtc_state(new_crtc_state);
6537 
6538 	dm_new_crtc_state->active_planes = 0;
6539 
6540 	if (!dm_new_crtc_state->stream)
6541 		return;
6542 
6543 	dm_new_crtc_state->active_planes =
6544 		count_crtc_active_planes(new_crtc_state);
6545 }
6546 
6547 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6548 				       struct drm_atomic_state *state)
6549 {
6550 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6551 									  crtc);
6552 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6553 	struct dc *dc = adev->dm.dc;
6554 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6555 	int ret = -EINVAL;
6556 
6557 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6558 
6559 	dm_update_crtc_active_planes(crtc, crtc_state);
6560 
6561 	if (unlikely(!dm_crtc_state->stream &&
6562 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6563 		WARN_ON(1);
6564 		return ret;
6565 	}
6566 
6567 	/*
6568 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6569 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6570 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6571 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6572 	 */
6573 	if (crtc_state->enable &&
6574 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6575 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6576 		return -EINVAL;
6577 	}
6578 
6579 	/* In some use cases, like reset, no stream is attached */
6580 	if (!dm_crtc_state->stream)
6581 		return 0;
6582 
6583 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6584 		return 0;
6585 
6586 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6587 	return ret;
6588 }
6589 
6590 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6591 				      const struct drm_display_mode *mode,
6592 				      struct drm_display_mode *adjusted_mode)
6593 {
6594 	return true;
6595 }
6596 
6597 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6598 	.disable = dm_crtc_helper_disable,
6599 	.atomic_check = dm_crtc_helper_atomic_check,
6600 	.mode_fixup = dm_crtc_helper_mode_fixup,
6601 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6602 };
6603 
6604 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6605 {
6606 
6607 }
6608 
6609 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6610 {
6611 	switch (display_color_depth) {
6612 		case COLOR_DEPTH_666:
6613 			return 6;
6614 		case COLOR_DEPTH_888:
6615 			return 8;
6616 		case COLOR_DEPTH_101010:
6617 			return 10;
6618 		case COLOR_DEPTH_121212:
6619 			return 12;
6620 		case COLOR_DEPTH_141414:
6621 			return 14;
6622 		case COLOR_DEPTH_161616:
6623 			return 16;
6624 		default:
6625 			break;
6626 		}
6627 	return 0;
6628 }
6629 
6630 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6631 					  struct drm_crtc_state *crtc_state,
6632 					  struct drm_connector_state *conn_state)
6633 {
6634 	struct drm_atomic_state *state = crtc_state->state;
6635 	struct drm_connector *connector = conn_state->connector;
6636 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6637 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6638 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6639 	struct drm_dp_mst_topology_mgr *mst_mgr;
6640 	struct drm_dp_mst_port *mst_port;
6641 	enum dc_color_depth color_depth;
6642 	int clock, bpp = 0;
6643 	bool is_y420 = false;
6644 
6645 	if (!aconnector->port || !aconnector->dc_sink)
6646 		return 0;
6647 
6648 	mst_port = aconnector->port;
6649 	mst_mgr = &aconnector->mst_port->mst_mgr;
6650 
6651 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6652 		return 0;
6653 
6654 	if (!state->duplicated) {
6655 		int max_bpc = conn_state->max_requested_bpc;
6656 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6657 				aconnector->force_yuv420_output;
6658 		color_depth = convert_color_depth_from_display_info(connector,
6659 								    is_y420,
6660 								    max_bpc);
6661 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6662 		clock = adjusted_mode->clock;
6663 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6664 	}
6665 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6666 									   mst_mgr,
6667 									   mst_port,
6668 									   dm_new_connector_state->pbn,
6669 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6670 	if (dm_new_connector_state->vcpi_slots < 0) {
6671 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6672 		return dm_new_connector_state->vcpi_slots;
6673 	}
6674 	return 0;
6675 }
6676 
6677 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6678 	.disable = dm_encoder_helper_disable,
6679 	.atomic_check = dm_encoder_helper_atomic_check
6680 };
6681 
6682 #if defined(CONFIG_DRM_AMD_DC_DCN)
6683 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6684 					    struct dc_state *dc_state)
6685 {
6686 	struct dc_stream_state *stream = NULL;
6687 	struct drm_connector *connector;
6688 	struct drm_connector_state *new_con_state;
6689 	struct amdgpu_dm_connector *aconnector;
6690 	struct dm_connector_state *dm_conn_state;
6691 	int i, j, clock, bpp;
6692 	int vcpi, pbn_div, pbn = 0;
6693 
6694 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6695 
6696 		aconnector = to_amdgpu_dm_connector(connector);
6697 
6698 		if (!aconnector->port)
6699 			continue;
6700 
6701 		if (!new_con_state || !new_con_state->crtc)
6702 			continue;
6703 
6704 		dm_conn_state = to_dm_connector_state(new_con_state);
6705 
6706 		for (j = 0; j < dc_state->stream_count; j++) {
6707 			stream = dc_state->streams[j];
6708 			if (!stream)
6709 				continue;
6710 
6711 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6712 				break;
6713 
6714 			stream = NULL;
6715 		}
6716 
6717 		if (!stream)
6718 			continue;
6719 
6720 		if (stream->timing.flags.DSC != 1) {
6721 			drm_dp_mst_atomic_enable_dsc(state,
6722 						     aconnector->port,
6723 						     dm_conn_state->pbn,
6724 						     0,
6725 						     false);
6726 			continue;
6727 		}
6728 
6729 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6730 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6731 		clock = stream->timing.pix_clk_100hz / 10;
6732 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6733 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6734 						    aconnector->port,
6735 						    pbn, pbn_div,
6736 						    true);
6737 		if (vcpi < 0)
6738 			return vcpi;
6739 
6740 		dm_conn_state->pbn = pbn;
6741 		dm_conn_state->vcpi_slots = vcpi;
6742 	}
6743 	return 0;
6744 }
6745 #endif
6746 
6747 static void dm_drm_plane_reset(struct drm_plane *plane)
6748 {
6749 	struct dm_plane_state *amdgpu_state = NULL;
6750 
6751 	if (plane->state)
6752 		plane->funcs->atomic_destroy_state(plane, plane->state);
6753 
6754 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6755 	WARN_ON(amdgpu_state == NULL);
6756 
6757 	if (amdgpu_state)
6758 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6759 }
6760 
6761 static struct drm_plane_state *
6762 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6763 {
6764 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6765 
6766 	old_dm_plane_state = to_dm_plane_state(plane->state);
6767 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6768 	if (!dm_plane_state)
6769 		return NULL;
6770 
6771 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6772 
6773 	if (old_dm_plane_state->dc_state) {
6774 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6775 		dc_plane_state_retain(dm_plane_state->dc_state);
6776 	}
6777 
6778 	return &dm_plane_state->base;
6779 }
6780 
6781 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6782 				struct drm_plane_state *state)
6783 {
6784 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6785 
6786 	if (dm_plane_state->dc_state)
6787 		dc_plane_state_release(dm_plane_state->dc_state);
6788 
6789 	drm_atomic_helper_plane_destroy_state(plane, state);
6790 }
6791 
6792 static const struct drm_plane_funcs dm_plane_funcs = {
6793 	.update_plane	= drm_atomic_helper_update_plane,
6794 	.disable_plane	= drm_atomic_helper_disable_plane,
6795 	.destroy	= drm_primary_helper_destroy,
6796 	.reset = dm_drm_plane_reset,
6797 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6798 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6799 	.format_mod_supported = dm_plane_format_mod_supported,
6800 };
6801 
6802 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6803 				      struct drm_plane_state *new_state)
6804 {
6805 	struct amdgpu_framebuffer *afb;
6806 	struct drm_gem_object *obj;
6807 	struct amdgpu_device *adev;
6808 	struct amdgpu_bo *rbo;
6809 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6810 	struct list_head list;
6811 	struct ttm_validate_buffer tv;
6812 	struct ww_acquire_ctx ticket;
6813 	uint32_t domain;
6814 	int r;
6815 
6816 	if (!new_state->fb) {
6817 		DRM_DEBUG_KMS("No FB bound\n");
6818 		return 0;
6819 	}
6820 
6821 	afb = to_amdgpu_framebuffer(new_state->fb);
6822 	obj = new_state->fb->obj[0];
6823 	rbo = gem_to_amdgpu_bo(obj);
6824 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6825 	INIT_LIST_HEAD(&list);
6826 
6827 	tv.bo = &rbo->tbo;
6828 	tv.num_shared = 1;
6829 	list_add(&tv.head, &list);
6830 
6831 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6832 	if (r) {
6833 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6834 		return r;
6835 	}
6836 
6837 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6838 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6839 	else
6840 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6841 
6842 	r = amdgpu_bo_pin(rbo, domain);
6843 	if (unlikely(r != 0)) {
6844 		if (r != -ERESTARTSYS)
6845 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6846 		ttm_eu_backoff_reservation(&ticket, &list);
6847 		return r;
6848 	}
6849 
6850 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6851 	if (unlikely(r != 0)) {
6852 		amdgpu_bo_unpin(rbo);
6853 		ttm_eu_backoff_reservation(&ticket, &list);
6854 		DRM_ERROR("%p bind failed\n", rbo);
6855 		return r;
6856 	}
6857 
6858 	ttm_eu_backoff_reservation(&ticket, &list);
6859 
6860 	afb->address = amdgpu_bo_gpu_offset(rbo);
6861 
6862 	amdgpu_bo_ref(rbo);
6863 
6864 	/**
6865 	 * We don't do surface updates on planes that have been newly created,
6866 	 * but we also don't have the afb->address during atomic check.
6867 	 *
6868 	 * Fill in buffer attributes depending on the address here, but only on
6869 	 * newly created planes since they're not being used by DC yet and this
6870 	 * won't modify global state.
6871 	 */
6872 	dm_plane_state_old = to_dm_plane_state(plane->state);
6873 	dm_plane_state_new = to_dm_plane_state(new_state);
6874 
6875 	if (dm_plane_state_new->dc_state &&
6876 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6877 		struct dc_plane_state *plane_state =
6878 			dm_plane_state_new->dc_state;
6879 		bool force_disable_dcc = !plane_state->dcc.enable;
6880 
6881 		fill_plane_buffer_attributes(
6882 			adev, afb, plane_state->format, plane_state->rotation,
6883 			afb->tiling_flags,
6884 			&plane_state->tiling_info, &plane_state->plane_size,
6885 			&plane_state->dcc, &plane_state->address,
6886 			afb->tmz_surface, force_disable_dcc);
6887 	}
6888 
6889 	return 0;
6890 }
6891 
6892 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6893 				       struct drm_plane_state *old_state)
6894 {
6895 	struct amdgpu_bo *rbo;
6896 	int r;
6897 
6898 	if (!old_state->fb)
6899 		return;
6900 
6901 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6902 	r = amdgpu_bo_reserve(rbo, false);
6903 	if (unlikely(r)) {
6904 		DRM_ERROR("failed to reserve rbo before unpin\n");
6905 		return;
6906 	}
6907 
6908 	amdgpu_bo_unpin(rbo);
6909 	amdgpu_bo_unreserve(rbo);
6910 	amdgpu_bo_unref(&rbo);
6911 }
6912 
6913 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6914 				       struct drm_crtc_state *new_crtc_state)
6915 {
6916 	struct drm_framebuffer *fb = state->fb;
6917 	int min_downscale, max_upscale;
6918 	int min_scale = 0;
6919 	int max_scale = INT_MAX;
6920 
6921 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6922 	if (fb && state->crtc) {
6923 		/* Validate viewport to cover the case when only the position changes */
6924 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6925 			int viewport_width = state->crtc_w;
6926 			int viewport_height = state->crtc_h;
6927 
6928 			if (state->crtc_x < 0)
6929 				viewport_width += state->crtc_x;
6930 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6931 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6932 
6933 			if (state->crtc_y < 0)
6934 				viewport_height += state->crtc_y;
6935 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6936 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6937 
6938 			if (viewport_width < 0 || viewport_height < 0) {
6939 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6940 				return -EINVAL;
6941 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6942 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6943 				return -EINVAL;
6944 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6945 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6946 				return -EINVAL;
6947 			}
6948 
6949 		}
6950 
6951 		/* Get min/max allowed scaling factors from plane caps. */
6952 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6953 					     &min_downscale, &max_upscale);
6954 		/*
6955 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6956 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6957 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6958 		 */
6959 		min_scale = (1000 << 16) / max_upscale;
6960 		max_scale = (1000 << 16) / min_downscale;
6961 	}
6962 
6963 	return drm_atomic_helper_check_plane_state(
6964 		state, new_crtc_state, min_scale, max_scale, true, true);
6965 }
6966 
6967 static int dm_plane_atomic_check(struct drm_plane *plane,
6968 				 struct drm_atomic_state *state)
6969 {
6970 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6971 										 plane);
6972 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6973 	struct dc *dc = adev->dm.dc;
6974 	struct dm_plane_state *dm_plane_state;
6975 	struct dc_scaling_info scaling_info;
6976 	struct drm_crtc_state *new_crtc_state;
6977 	int ret;
6978 
6979 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6980 
6981 	dm_plane_state = to_dm_plane_state(new_plane_state);
6982 
6983 	if (!dm_plane_state->dc_state)
6984 		return 0;
6985 
6986 	new_crtc_state =
6987 		drm_atomic_get_new_crtc_state(state,
6988 					      new_plane_state->crtc);
6989 	if (!new_crtc_state)
6990 		return -EINVAL;
6991 
6992 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6993 	if (ret)
6994 		return ret;
6995 
6996 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6997 	if (ret)
6998 		return ret;
6999 
7000 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7001 		return 0;
7002 
7003 	return -EINVAL;
7004 }
7005 
7006 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7007 				       struct drm_atomic_state *state)
7008 {
7009 	/* Only support async updates on cursor planes. */
7010 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7011 		return -EINVAL;
7012 
7013 	return 0;
7014 }
7015 
7016 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7017 					 struct drm_atomic_state *state)
7018 {
7019 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7020 									   plane);
7021 	struct drm_plane_state *old_state =
7022 		drm_atomic_get_old_plane_state(state, plane);
7023 
7024 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7025 
7026 	swap(plane->state->fb, new_state->fb);
7027 
7028 	plane->state->src_x = new_state->src_x;
7029 	plane->state->src_y = new_state->src_y;
7030 	plane->state->src_w = new_state->src_w;
7031 	plane->state->src_h = new_state->src_h;
7032 	plane->state->crtc_x = new_state->crtc_x;
7033 	plane->state->crtc_y = new_state->crtc_y;
7034 	plane->state->crtc_w = new_state->crtc_w;
7035 	plane->state->crtc_h = new_state->crtc_h;
7036 
7037 	handle_cursor_update(plane, old_state);
7038 }
7039 
7040 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7041 	.prepare_fb = dm_plane_helper_prepare_fb,
7042 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7043 	.atomic_check = dm_plane_atomic_check,
7044 	.atomic_async_check = dm_plane_atomic_async_check,
7045 	.atomic_async_update = dm_plane_atomic_async_update
7046 };
7047 
7048 /*
7049  * TODO: these are currently initialized to rgb formats only.
7050  * For future use cases we should either initialize them dynamically based on
7051  * plane capabilities, or initialize this array to all formats, so internal drm
7052  * check will succeed, and let DC implement proper check
7053  */
7054 static const uint32_t rgb_formats[] = {
7055 	DRM_FORMAT_XRGB8888,
7056 	DRM_FORMAT_ARGB8888,
7057 	DRM_FORMAT_RGBA8888,
7058 	DRM_FORMAT_XRGB2101010,
7059 	DRM_FORMAT_XBGR2101010,
7060 	DRM_FORMAT_ARGB2101010,
7061 	DRM_FORMAT_ABGR2101010,
7062 	DRM_FORMAT_XBGR8888,
7063 	DRM_FORMAT_ABGR8888,
7064 	DRM_FORMAT_RGB565,
7065 };
7066 
7067 static const uint32_t overlay_formats[] = {
7068 	DRM_FORMAT_XRGB8888,
7069 	DRM_FORMAT_ARGB8888,
7070 	DRM_FORMAT_RGBA8888,
7071 	DRM_FORMAT_XBGR8888,
7072 	DRM_FORMAT_ABGR8888,
7073 	DRM_FORMAT_RGB565
7074 };
7075 
7076 static const u32 cursor_formats[] = {
7077 	DRM_FORMAT_ARGB8888
7078 };
7079 
7080 static int get_plane_formats(const struct drm_plane *plane,
7081 			     const struct dc_plane_cap *plane_cap,
7082 			     uint32_t *formats, int max_formats)
7083 {
7084 	int i, num_formats = 0;
7085 
7086 	/*
7087 	 * TODO: Query support for each group of formats directly from
7088 	 * DC plane caps. This will require adding more formats to the
7089 	 * caps list.
7090 	 */
7091 
7092 	switch (plane->type) {
7093 	case DRM_PLANE_TYPE_PRIMARY:
7094 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7095 			if (num_formats >= max_formats)
7096 				break;
7097 
7098 			formats[num_formats++] = rgb_formats[i];
7099 		}
7100 
7101 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7102 			formats[num_formats++] = DRM_FORMAT_NV12;
7103 		if (plane_cap && plane_cap->pixel_format_support.p010)
7104 			formats[num_formats++] = DRM_FORMAT_P010;
7105 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7106 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7107 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7108 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7109 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7110 		}
7111 		break;
7112 
7113 	case DRM_PLANE_TYPE_OVERLAY:
7114 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7115 			if (num_formats >= max_formats)
7116 				break;
7117 
7118 			formats[num_formats++] = overlay_formats[i];
7119 		}
7120 		break;
7121 
7122 	case DRM_PLANE_TYPE_CURSOR:
7123 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7124 			if (num_formats >= max_formats)
7125 				break;
7126 
7127 			formats[num_formats++] = cursor_formats[i];
7128 		}
7129 		break;
7130 	}
7131 
7132 	return num_formats;
7133 }
7134 
7135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7136 				struct drm_plane *plane,
7137 				unsigned long possible_crtcs,
7138 				const struct dc_plane_cap *plane_cap)
7139 {
7140 	uint32_t formats[32];
7141 	int num_formats;
7142 	int res = -EPERM;
7143 	unsigned int supported_rotations;
7144 	uint64_t *modifiers = NULL;
7145 
7146 	num_formats = get_plane_formats(plane, plane_cap, formats,
7147 					ARRAY_SIZE(formats));
7148 
7149 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7150 	if (res)
7151 		return res;
7152 
7153 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7154 				       &dm_plane_funcs, formats, num_formats,
7155 				       modifiers, plane->type, NULL);
7156 	kfree(modifiers);
7157 	if (res)
7158 		return res;
7159 
7160 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7161 	    plane_cap && plane_cap->per_pixel_alpha) {
7162 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7163 					  BIT(DRM_MODE_BLEND_PREMULTI);
7164 
7165 		drm_plane_create_alpha_property(plane);
7166 		drm_plane_create_blend_mode_property(plane, blend_caps);
7167 	}
7168 
7169 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7170 	    plane_cap &&
7171 	    (plane_cap->pixel_format_support.nv12 ||
7172 	     plane_cap->pixel_format_support.p010)) {
7173 		/* This only affects YUV formats. */
7174 		drm_plane_create_color_properties(
7175 			plane,
7176 			BIT(DRM_COLOR_YCBCR_BT601) |
7177 			BIT(DRM_COLOR_YCBCR_BT709) |
7178 			BIT(DRM_COLOR_YCBCR_BT2020),
7179 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7180 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7181 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7182 	}
7183 
7184 	supported_rotations =
7185 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7186 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7187 
7188 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7189 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7190 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7191 						   supported_rotations);
7192 
7193 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7194 
7195 	/* Create (reset) the plane state */
7196 	if (plane->funcs->reset)
7197 		plane->funcs->reset(plane);
7198 
7199 	return 0;
7200 }
7201 
7202 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7203 			       struct drm_plane *plane,
7204 			       uint32_t crtc_index)
7205 {
7206 	struct amdgpu_crtc *acrtc = NULL;
7207 	struct drm_plane *cursor_plane;
7208 
7209 	int res = -ENOMEM;
7210 
7211 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7212 	if (!cursor_plane)
7213 		goto fail;
7214 
7215 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7216 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7217 
7218 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7219 	if (!acrtc)
7220 		goto fail;
7221 
7222 	res = drm_crtc_init_with_planes(
7223 			dm->ddev,
7224 			&acrtc->base,
7225 			plane,
7226 			cursor_plane,
7227 			&amdgpu_dm_crtc_funcs, NULL);
7228 
7229 	if (res)
7230 		goto fail;
7231 
7232 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7233 
7234 	/* Create (reset) the plane state */
7235 	if (acrtc->base.funcs->reset)
7236 		acrtc->base.funcs->reset(&acrtc->base);
7237 
7238 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7239 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7240 
7241 	acrtc->crtc_id = crtc_index;
7242 	acrtc->base.enabled = false;
7243 	acrtc->otg_inst = -1;
7244 
7245 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7246 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7247 				   true, MAX_COLOR_LUT_ENTRIES);
7248 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7249 
7250 	return 0;
7251 
7252 fail:
7253 	kfree(acrtc);
7254 	kfree(cursor_plane);
7255 	return res;
7256 }
7257 
7258 
7259 static int to_drm_connector_type(enum signal_type st)
7260 {
7261 	switch (st) {
7262 	case SIGNAL_TYPE_HDMI_TYPE_A:
7263 		return DRM_MODE_CONNECTOR_HDMIA;
7264 	case SIGNAL_TYPE_EDP:
7265 		return DRM_MODE_CONNECTOR_eDP;
7266 	case SIGNAL_TYPE_LVDS:
7267 		return DRM_MODE_CONNECTOR_LVDS;
7268 	case SIGNAL_TYPE_RGB:
7269 		return DRM_MODE_CONNECTOR_VGA;
7270 	case SIGNAL_TYPE_DISPLAY_PORT:
7271 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7272 		return DRM_MODE_CONNECTOR_DisplayPort;
7273 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7274 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7275 		return DRM_MODE_CONNECTOR_DVID;
7276 	case SIGNAL_TYPE_VIRTUAL:
7277 		return DRM_MODE_CONNECTOR_VIRTUAL;
7278 
7279 	default:
7280 		return DRM_MODE_CONNECTOR_Unknown;
7281 	}
7282 }
7283 
7284 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7285 {
7286 	struct drm_encoder *encoder;
7287 
7288 	/* There is only one encoder per connector */
7289 	drm_connector_for_each_possible_encoder(connector, encoder)
7290 		return encoder;
7291 
7292 	return NULL;
7293 }
7294 
7295 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7296 {
7297 	struct drm_encoder *encoder;
7298 	struct amdgpu_encoder *amdgpu_encoder;
7299 
7300 	encoder = amdgpu_dm_connector_to_encoder(connector);
7301 
7302 	if (encoder == NULL)
7303 		return;
7304 
7305 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7306 
7307 	amdgpu_encoder->native_mode.clock = 0;
7308 
7309 	if (!list_empty(&connector->probed_modes)) {
7310 		struct drm_display_mode *preferred_mode = NULL;
7311 
7312 		list_for_each_entry(preferred_mode,
7313 				    &connector->probed_modes,
7314 				    head) {
7315 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7316 				amdgpu_encoder->native_mode = *preferred_mode;
7317 
7318 			break;
7319 		}
7320 
7321 	}
7322 }
7323 
7324 static struct drm_display_mode *
7325 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7326 			     char *name,
7327 			     int hdisplay, int vdisplay)
7328 {
7329 	struct drm_device *dev = encoder->dev;
7330 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7331 	struct drm_display_mode *mode = NULL;
7332 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7333 
7334 	mode = drm_mode_duplicate(dev, native_mode);
7335 
7336 	if (mode == NULL)
7337 		return NULL;
7338 
7339 	mode->hdisplay = hdisplay;
7340 	mode->vdisplay = vdisplay;
7341 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7342 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7343 
7344 	return mode;
7345 
7346 }
7347 
7348 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7349 						 struct drm_connector *connector)
7350 {
7351 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7352 	struct drm_display_mode *mode = NULL;
7353 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7354 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7355 				to_amdgpu_dm_connector(connector);
7356 	int i;
7357 	int n;
7358 	struct mode_size {
7359 		char name[DRM_DISPLAY_MODE_LEN];
7360 		int w;
7361 		int h;
7362 	} common_modes[] = {
7363 		{  "640x480",  640,  480},
7364 		{  "800x600",  800,  600},
7365 		{ "1024x768", 1024,  768},
7366 		{ "1280x720", 1280,  720},
7367 		{ "1280x800", 1280,  800},
7368 		{"1280x1024", 1280, 1024},
7369 		{ "1440x900", 1440,  900},
7370 		{"1680x1050", 1680, 1050},
7371 		{"1600x1200", 1600, 1200},
7372 		{"1920x1080", 1920, 1080},
7373 		{"1920x1200", 1920, 1200}
7374 	};
7375 
7376 	n = ARRAY_SIZE(common_modes);
7377 
7378 	for (i = 0; i < n; i++) {
7379 		struct drm_display_mode *curmode = NULL;
7380 		bool mode_existed = false;
7381 
7382 		if (common_modes[i].w > native_mode->hdisplay ||
7383 		    common_modes[i].h > native_mode->vdisplay ||
7384 		   (common_modes[i].w == native_mode->hdisplay &&
7385 		    common_modes[i].h == native_mode->vdisplay))
7386 			continue;
7387 
7388 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7389 			if (common_modes[i].w == curmode->hdisplay &&
7390 			    common_modes[i].h == curmode->vdisplay) {
7391 				mode_existed = true;
7392 				break;
7393 			}
7394 		}
7395 
7396 		if (mode_existed)
7397 			continue;
7398 
7399 		mode = amdgpu_dm_create_common_mode(encoder,
7400 				common_modes[i].name, common_modes[i].w,
7401 				common_modes[i].h);
7402 		drm_mode_probed_add(connector, mode);
7403 		amdgpu_dm_connector->num_modes++;
7404 	}
7405 }
7406 
7407 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7408 					      struct edid *edid)
7409 {
7410 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7411 			to_amdgpu_dm_connector(connector);
7412 
7413 	if (edid) {
7414 		/* empty probed_modes */
7415 		INIT_LIST_HEAD(&connector->probed_modes);
7416 		amdgpu_dm_connector->num_modes =
7417 				drm_add_edid_modes(connector, edid);
7418 
7419 		/* sorting the probed modes before calling function
7420 		 * amdgpu_dm_get_native_mode() since EDID can have
7421 		 * more than one preferred mode. The modes that are
7422 		 * later in the probed mode list could be of higher
7423 		 * and preferred resolution. For example, 3840x2160
7424 		 * resolution in base EDID preferred timing and 4096x2160
7425 		 * preferred resolution in DID extension block later.
7426 		 */
7427 		drm_mode_sort(&connector->probed_modes);
7428 		amdgpu_dm_get_native_mode(connector);
7429 
7430 		/* Freesync capabilities are reset by calling
7431 		 * drm_add_edid_modes() and need to be
7432 		 * restored here.
7433 		 */
7434 		amdgpu_dm_update_freesync_caps(connector, edid);
7435 	} else {
7436 		amdgpu_dm_connector->num_modes = 0;
7437 	}
7438 }
7439 
7440 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7441 			      struct drm_display_mode *mode)
7442 {
7443 	struct drm_display_mode *m;
7444 
7445 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7446 		if (drm_mode_equal(m, mode))
7447 			return true;
7448 	}
7449 
7450 	return false;
7451 }
7452 
7453 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7454 {
7455 	const struct drm_display_mode *m;
7456 	struct drm_display_mode *new_mode;
7457 	uint i;
7458 	uint32_t new_modes_count = 0;
7459 
7460 	/* Standard FPS values
7461 	 *
7462 	 * 23.976   - TV/NTSC
7463 	 * 24 	    - Cinema
7464 	 * 25 	    - TV/PAL
7465 	 * 29.97    - TV/NTSC
7466 	 * 30 	    - TV/NTSC
7467 	 * 48 	    - Cinema HFR
7468 	 * 50 	    - TV/PAL
7469 	 * 60 	    - Commonly used
7470 	 * 48,72,96 - Multiples of 24
7471 	 */
7472 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7473 					 48000, 50000, 60000, 72000, 96000 };
7474 
7475 	/*
7476 	 * Find mode with highest refresh rate with the same resolution
7477 	 * as the preferred mode. Some monitors report a preferred mode
7478 	 * with lower resolution than the highest refresh rate supported.
7479 	 */
7480 
7481 	m = get_highest_refresh_rate_mode(aconnector, true);
7482 	if (!m)
7483 		return 0;
7484 
7485 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7486 		uint64_t target_vtotal, target_vtotal_diff;
7487 		uint64_t num, den;
7488 
7489 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7490 			continue;
7491 
7492 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7493 		    common_rates[i] > aconnector->max_vfreq * 1000)
7494 			continue;
7495 
7496 		num = (unsigned long long)m->clock * 1000 * 1000;
7497 		den = common_rates[i] * (unsigned long long)m->htotal;
7498 		target_vtotal = div_u64(num, den);
7499 		target_vtotal_diff = target_vtotal - m->vtotal;
7500 
7501 		/* Check for illegal modes */
7502 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7503 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7504 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7505 			continue;
7506 
7507 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7508 		if (!new_mode)
7509 			goto out;
7510 
7511 		new_mode->vtotal += (u16)target_vtotal_diff;
7512 		new_mode->vsync_start += (u16)target_vtotal_diff;
7513 		new_mode->vsync_end += (u16)target_vtotal_diff;
7514 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7515 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7516 
7517 		if (!is_duplicate_mode(aconnector, new_mode)) {
7518 			drm_mode_probed_add(&aconnector->base, new_mode);
7519 			new_modes_count += 1;
7520 		} else
7521 			drm_mode_destroy(aconnector->base.dev, new_mode);
7522 	}
7523  out:
7524 	return new_modes_count;
7525 }
7526 
7527 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7528 						   struct edid *edid)
7529 {
7530 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7531 		to_amdgpu_dm_connector(connector);
7532 
7533 	if (!(amdgpu_freesync_vid_mode && edid))
7534 		return;
7535 
7536 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7537 		amdgpu_dm_connector->num_modes +=
7538 			add_fs_modes(amdgpu_dm_connector);
7539 }
7540 
7541 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7542 {
7543 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7544 			to_amdgpu_dm_connector(connector);
7545 	struct drm_encoder *encoder;
7546 	struct edid *edid = amdgpu_dm_connector->edid;
7547 
7548 	encoder = amdgpu_dm_connector_to_encoder(connector);
7549 
7550 	if (!drm_edid_is_valid(edid)) {
7551 		amdgpu_dm_connector->num_modes =
7552 				drm_add_modes_noedid(connector, 640, 480);
7553 	} else {
7554 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7555 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7556 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7557 	}
7558 	amdgpu_dm_fbc_init(connector);
7559 
7560 	return amdgpu_dm_connector->num_modes;
7561 }
7562 
7563 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7564 				     struct amdgpu_dm_connector *aconnector,
7565 				     int connector_type,
7566 				     struct dc_link *link,
7567 				     int link_index)
7568 {
7569 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7570 
7571 	/*
7572 	 * Some of the properties below require access to state, like bpc.
7573 	 * Allocate some default initial connector state with our reset helper.
7574 	 */
7575 	if (aconnector->base.funcs->reset)
7576 		aconnector->base.funcs->reset(&aconnector->base);
7577 
7578 	aconnector->connector_id = link_index;
7579 	aconnector->dc_link = link;
7580 	aconnector->base.interlace_allowed = false;
7581 	aconnector->base.doublescan_allowed = false;
7582 	aconnector->base.stereo_allowed = false;
7583 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7584 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7585 	aconnector->audio_inst = -1;
7586 	mutex_init(&aconnector->hpd_lock);
7587 
7588 	/*
7589 	 * configure support HPD hot plug connector_>polled default value is 0
7590 	 * which means HPD hot plug not supported
7591 	 */
7592 	switch (connector_type) {
7593 	case DRM_MODE_CONNECTOR_HDMIA:
7594 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7595 		aconnector->base.ycbcr_420_allowed =
7596 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7597 		break;
7598 	case DRM_MODE_CONNECTOR_DisplayPort:
7599 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7600 		aconnector->base.ycbcr_420_allowed =
7601 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7602 		break;
7603 	case DRM_MODE_CONNECTOR_DVID:
7604 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7605 		break;
7606 	default:
7607 		break;
7608 	}
7609 
7610 	drm_object_attach_property(&aconnector->base.base,
7611 				dm->ddev->mode_config.scaling_mode_property,
7612 				DRM_MODE_SCALE_NONE);
7613 
7614 	drm_object_attach_property(&aconnector->base.base,
7615 				adev->mode_info.underscan_property,
7616 				UNDERSCAN_OFF);
7617 	drm_object_attach_property(&aconnector->base.base,
7618 				adev->mode_info.underscan_hborder_property,
7619 				0);
7620 	drm_object_attach_property(&aconnector->base.base,
7621 				adev->mode_info.underscan_vborder_property,
7622 				0);
7623 
7624 	if (!aconnector->mst_port)
7625 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7626 
7627 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7628 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7629 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7630 
7631 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7632 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7633 		drm_object_attach_property(&aconnector->base.base,
7634 				adev->mode_info.abm_level_property, 0);
7635 	}
7636 
7637 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7638 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7639 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7640 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7641 
7642 		if (!aconnector->mst_port)
7643 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7644 
7645 #ifdef CONFIG_DRM_AMD_DC_HDCP
7646 		if (adev->dm.hdcp_workqueue)
7647 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7648 #endif
7649 	}
7650 }
7651 
7652 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7653 			      struct i2c_msg *msgs, int num)
7654 {
7655 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7656 	struct ddc_service *ddc_service = i2c->ddc_service;
7657 	struct i2c_command cmd;
7658 	int i;
7659 	int result = -EIO;
7660 
7661 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7662 
7663 	if (!cmd.payloads)
7664 		return result;
7665 
7666 	cmd.number_of_payloads = num;
7667 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7668 	cmd.speed = 100;
7669 
7670 	for (i = 0; i < num; i++) {
7671 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7672 		cmd.payloads[i].address = msgs[i].addr;
7673 		cmd.payloads[i].length = msgs[i].len;
7674 		cmd.payloads[i].data = msgs[i].buf;
7675 	}
7676 
7677 	if (dc_submit_i2c(
7678 			ddc_service->ctx->dc,
7679 			ddc_service->ddc_pin->hw_info.ddc_channel,
7680 			&cmd))
7681 		result = num;
7682 
7683 	kfree(cmd.payloads);
7684 	return result;
7685 }
7686 
7687 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7688 {
7689 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7690 }
7691 
7692 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7693 	.master_xfer = amdgpu_dm_i2c_xfer,
7694 	.functionality = amdgpu_dm_i2c_func,
7695 };
7696 
7697 static struct amdgpu_i2c_adapter *
7698 create_i2c(struct ddc_service *ddc_service,
7699 	   int link_index,
7700 	   int *res)
7701 {
7702 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7703 	struct amdgpu_i2c_adapter *i2c;
7704 
7705 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7706 	if (!i2c)
7707 		return NULL;
7708 	i2c->base.owner = THIS_MODULE;
7709 	i2c->base.class = I2C_CLASS_DDC;
7710 	i2c->base.dev.parent = &adev->pdev->dev;
7711 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7712 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7713 	i2c_set_adapdata(&i2c->base, i2c);
7714 	i2c->ddc_service = ddc_service;
7715 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7716 
7717 	return i2c;
7718 }
7719 
7720 
7721 /*
7722  * Note: this function assumes that dc_link_detect() was called for the
7723  * dc_link which will be represented by this aconnector.
7724  */
7725 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7726 				    struct amdgpu_dm_connector *aconnector,
7727 				    uint32_t link_index,
7728 				    struct amdgpu_encoder *aencoder)
7729 {
7730 	int res = 0;
7731 	int connector_type;
7732 	struct dc *dc = dm->dc;
7733 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7734 	struct amdgpu_i2c_adapter *i2c;
7735 
7736 	link->priv = aconnector;
7737 
7738 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7739 
7740 	i2c = create_i2c(link->ddc, link->link_index, &res);
7741 	if (!i2c) {
7742 		DRM_ERROR("Failed to create i2c adapter data\n");
7743 		return -ENOMEM;
7744 	}
7745 
7746 	aconnector->i2c = i2c;
7747 	res = i2c_add_adapter(&i2c->base);
7748 
7749 	if (res) {
7750 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7751 		goto out_free;
7752 	}
7753 
7754 	connector_type = to_drm_connector_type(link->connector_signal);
7755 
7756 	res = drm_connector_init_with_ddc(
7757 			dm->ddev,
7758 			&aconnector->base,
7759 			&amdgpu_dm_connector_funcs,
7760 			connector_type,
7761 			&i2c->base);
7762 
7763 	if (res) {
7764 		DRM_ERROR("connector_init failed\n");
7765 		aconnector->connector_id = -1;
7766 		goto out_free;
7767 	}
7768 
7769 	drm_connector_helper_add(
7770 			&aconnector->base,
7771 			&amdgpu_dm_connector_helper_funcs);
7772 
7773 	amdgpu_dm_connector_init_helper(
7774 		dm,
7775 		aconnector,
7776 		connector_type,
7777 		link,
7778 		link_index);
7779 
7780 	drm_connector_attach_encoder(
7781 		&aconnector->base, &aencoder->base);
7782 
7783 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7784 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7785 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7786 
7787 out_free:
7788 	if (res) {
7789 		kfree(i2c);
7790 		aconnector->i2c = NULL;
7791 	}
7792 	return res;
7793 }
7794 
7795 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7796 {
7797 	switch (adev->mode_info.num_crtc) {
7798 	case 1:
7799 		return 0x1;
7800 	case 2:
7801 		return 0x3;
7802 	case 3:
7803 		return 0x7;
7804 	case 4:
7805 		return 0xf;
7806 	case 5:
7807 		return 0x1f;
7808 	case 6:
7809 	default:
7810 		return 0x3f;
7811 	}
7812 }
7813 
7814 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7815 				  struct amdgpu_encoder *aencoder,
7816 				  uint32_t link_index)
7817 {
7818 	struct amdgpu_device *adev = drm_to_adev(dev);
7819 
7820 	int res = drm_encoder_init(dev,
7821 				   &aencoder->base,
7822 				   &amdgpu_dm_encoder_funcs,
7823 				   DRM_MODE_ENCODER_TMDS,
7824 				   NULL);
7825 
7826 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7827 
7828 	if (!res)
7829 		aencoder->encoder_id = link_index;
7830 	else
7831 		aencoder->encoder_id = -1;
7832 
7833 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7834 
7835 	return res;
7836 }
7837 
7838 static void manage_dm_interrupts(struct amdgpu_device *adev,
7839 				 struct amdgpu_crtc *acrtc,
7840 				 bool enable)
7841 {
7842 	/*
7843 	 * We have no guarantee that the frontend index maps to the same
7844 	 * backend index - some even map to more than one.
7845 	 *
7846 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7847 	 */
7848 	int irq_type =
7849 		amdgpu_display_crtc_idx_to_irq_type(
7850 			adev,
7851 			acrtc->crtc_id);
7852 
7853 	if (enable) {
7854 		drm_crtc_vblank_on(&acrtc->base);
7855 		amdgpu_irq_get(
7856 			adev,
7857 			&adev->pageflip_irq,
7858 			irq_type);
7859 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7860 		amdgpu_irq_get(
7861 			adev,
7862 			&adev->vline0_irq,
7863 			irq_type);
7864 #endif
7865 	} else {
7866 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7867 		amdgpu_irq_put(
7868 			adev,
7869 			&adev->vline0_irq,
7870 			irq_type);
7871 #endif
7872 		amdgpu_irq_put(
7873 			adev,
7874 			&adev->pageflip_irq,
7875 			irq_type);
7876 		drm_crtc_vblank_off(&acrtc->base);
7877 	}
7878 }
7879 
7880 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7881 				      struct amdgpu_crtc *acrtc)
7882 {
7883 	int irq_type =
7884 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7885 
7886 	/**
7887 	 * This reads the current state for the IRQ and force reapplies
7888 	 * the setting to hardware.
7889 	 */
7890 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7891 }
7892 
7893 static bool
7894 is_scaling_state_different(const struct dm_connector_state *dm_state,
7895 			   const struct dm_connector_state *old_dm_state)
7896 {
7897 	if (dm_state->scaling != old_dm_state->scaling)
7898 		return true;
7899 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7900 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7901 			return true;
7902 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7903 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7904 			return true;
7905 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7906 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7907 		return true;
7908 	return false;
7909 }
7910 
7911 #ifdef CONFIG_DRM_AMD_DC_HDCP
7912 static bool is_content_protection_different(struct drm_connector_state *state,
7913 					    const struct drm_connector_state *old_state,
7914 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7915 {
7916 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7917 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7918 
7919 	/* Handle: Type0/1 change */
7920 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7921 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7922 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7923 		return true;
7924 	}
7925 
7926 	/* CP is being re enabled, ignore this
7927 	 *
7928 	 * Handles:	ENABLED -> DESIRED
7929 	 */
7930 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7931 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7932 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7933 		return false;
7934 	}
7935 
7936 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7937 	 *
7938 	 * Handles:	UNDESIRED -> ENABLED
7939 	 */
7940 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7941 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7942 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7943 
7944 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7945 	 * hot-plug, headless s3, dpms
7946 	 *
7947 	 * Handles:	DESIRED -> DESIRED (Special case)
7948 	 */
7949 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7950 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7951 		dm_con_state->update_hdcp = false;
7952 		return true;
7953 	}
7954 
7955 	/*
7956 	 * Handles:	UNDESIRED -> UNDESIRED
7957 	 *		DESIRED -> DESIRED
7958 	 *		ENABLED -> ENABLED
7959 	 */
7960 	if (old_state->content_protection == state->content_protection)
7961 		return false;
7962 
7963 	/*
7964 	 * Handles:	UNDESIRED -> DESIRED
7965 	 *		DESIRED -> UNDESIRED
7966 	 *		ENABLED -> UNDESIRED
7967 	 */
7968 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7969 		return true;
7970 
7971 	/*
7972 	 * Handles:	DESIRED -> ENABLED
7973 	 */
7974 	return false;
7975 }
7976 
7977 #endif
7978 static void remove_stream(struct amdgpu_device *adev,
7979 			  struct amdgpu_crtc *acrtc,
7980 			  struct dc_stream_state *stream)
7981 {
7982 	/* this is the update mode case */
7983 
7984 	acrtc->otg_inst = -1;
7985 	acrtc->enabled = false;
7986 }
7987 
7988 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7989 			       struct dc_cursor_position *position)
7990 {
7991 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7992 	int x, y;
7993 	int xorigin = 0, yorigin = 0;
7994 
7995 	if (!crtc || !plane->state->fb)
7996 		return 0;
7997 
7998 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7999 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8000 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8001 			  __func__,
8002 			  plane->state->crtc_w,
8003 			  plane->state->crtc_h);
8004 		return -EINVAL;
8005 	}
8006 
8007 	x = plane->state->crtc_x;
8008 	y = plane->state->crtc_y;
8009 
8010 	if (x <= -amdgpu_crtc->max_cursor_width ||
8011 	    y <= -amdgpu_crtc->max_cursor_height)
8012 		return 0;
8013 
8014 	if (x < 0) {
8015 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8016 		x = 0;
8017 	}
8018 	if (y < 0) {
8019 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8020 		y = 0;
8021 	}
8022 	position->enable = true;
8023 	position->translate_by_source = true;
8024 	position->x = x;
8025 	position->y = y;
8026 	position->x_hotspot = xorigin;
8027 	position->y_hotspot = yorigin;
8028 
8029 	return 0;
8030 }
8031 
8032 static void handle_cursor_update(struct drm_plane *plane,
8033 				 struct drm_plane_state *old_plane_state)
8034 {
8035 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8036 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8037 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8038 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8039 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8040 	uint64_t address = afb ? afb->address : 0;
8041 	struct dc_cursor_position position = {0};
8042 	struct dc_cursor_attributes attributes;
8043 	int ret;
8044 
8045 	if (!plane->state->fb && !old_plane_state->fb)
8046 		return;
8047 
8048 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8049 		      __func__,
8050 		      amdgpu_crtc->crtc_id,
8051 		      plane->state->crtc_w,
8052 		      plane->state->crtc_h);
8053 
8054 	ret = get_cursor_position(plane, crtc, &position);
8055 	if (ret)
8056 		return;
8057 
8058 	if (!position.enable) {
8059 		/* turn off cursor */
8060 		if (crtc_state && crtc_state->stream) {
8061 			mutex_lock(&adev->dm.dc_lock);
8062 			dc_stream_set_cursor_position(crtc_state->stream,
8063 						      &position);
8064 			mutex_unlock(&adev->dm.dc_lock);
8065 		}
8066 		return;
8067 	}
8068 
8069 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8070 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8071 
8072 	memset(&attributes, 0, sizeof(attributes));
8073 	attributes.address.high_part = upper_32_bits(address);
8074 	attributes.address.low_part  = lower_32_bits(address);
8075 	attributes.width             = plane->state->crtc_w;
8076 	attributes.height            = plane->state->crtc_h;
8077 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8078 	attributes.rotation_angle    = 0;
8079 	attributes.attribute_flags.value = 0;
8080 
8081 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8082 
8083 	if (crtc_state->stream) {
8084 		mutex_lock(&adev->dm.dc_lock);
8085 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8086 							 &attributes))
8087 			DRM_ERROR("DC failed to set cursor attributes\n");
8088 
8089 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8090 						   &position))
8091 			DRM_ERROR("DC failed to set cursor position\n");
8092 		mutex_unlock(&adev->dm.dc_lock);
8093 	}
8094 }
8095 
8096 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8097 {
8098 
8099 	assert_spin_locked(&acrtc->base.dev->event_lock);
8100 	WARN_ON(acrtc->event);
8101 
8102 	acrtc->event = acrtc->base.state->event;
8103 
8104 	/* Set the flip status */
8105 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8106 
8107 	/* Mark this event as consumed */
8108 	acrtc->base.state->event = NULL;
8109 
8110 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8111 		     acrtc->crtc_id);
8112 }
8113 
8114 static void update_freesync_state_on_stream(
8115 	struct amdgpu_display_manager *dm,
8116 	struct dm_crtc_state *new_crtc_state,
8117 	struct dc_stream_state *new_stream,
8118 	struct dc_plane_state *surface,
8119 	u32 flip_timestamp_in_us)
8120 {
8121 	struct mod_vrr_params vrr_params;
8122 	struct dc_info_packet vrr_infopacket = {0};
8123 	struct amdgpu_device *adev = dm->adev;
8124 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8125 	unsigned long flags;
8126 	bool pack_sdp_v1_3 = false;
8127 
8128 	if (!new_stream)
8129 		return;
8130 
8131 	/*
8132 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8133 	 * For now it's sufficient to just guard against these conditions.
8134 	 */
8135 
8136 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8137 		return;
8138 
8139 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8140         vrr_params = acrtc->dm_irq_params.vrr_params;
8141 
8142 	if (surface) {
8143 		mod_freesync_handle_preflip(
8144 			dm->freesync_module,
8145 			surface,
8146 			new_stream,
8147 			flip_timestamp_in_us,
8148 			&vrr_params);
8149 
8150 		if (adev->family < AMDGPU_FAMILY_AI &&
8151 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8152 			mod_freesync_handle_v_update(dm->freesync_module,
8153 						     new_stream, &vrr_params);
8154 
8155 			/* Need to call this before the frame ends. */
8156 			dc_stream_adjust_vmin_vmax(dm->dc,
8157 						   new_crtc_state->stream,
8158 						   &vrr_params.adjust);
8159 		}
8160 	}
8161 
8162 	mod_freesync_build_vrr_infopacket(
8163 		dm->freesync_module,
8164 		new_stream,
8165 		&vrr_params,
8166 		PACKET_TYPE_VRR,
8167 		TRANSFER_FUNC_UNKNOWN,
8168 		&vrr_infopacket,
8169 		pack_sdp_v1_3);
8170 
8171 	new_crtc_state->freesync_timing_changed |=
8172 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8173 			&vrr_params.adjust,
8174 			sizeof(vrr_params.adjust)) != 0);
8175 
8176 	new_crtc_state->freesync_vrr_info_changed |=
8177 		(memcmp(&new_crtc_state->vrr_infopacket,
8178 			&vrr_infopacket,
8179 			sizeof(vrr_infopacket)) != 0);
8180 
8181 	acrtc->dm_irq_params.vrr_params = vrr_params;
8182 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8183 
8184 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8185 	new_stream->vrr_infopacket = vrr_infopacket;
8186 
8187 	if (new_crtc_state->freesync_vrr_info_changed)
8188 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8189 			      new_crtc_state->base.crtc->base.id,
8190 			      (int)new_crtc_state->base.vrr_enabled,
8191 			      (int)vrr_params.state);
8192 
8193 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8194 }
8195 
8196 static void update_stream_irq_parameters(
8197 	struct amdgpu_display_manager *dm,
8198 	struct dm_crtc_state *new_crtc_state)
8199 {
8200 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8201 	struct mod_vrr_params vrr_params;
8202 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8203 	struct amdgpu_device *adev = dm->adev;
8204 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8205 	unsigned long flags;
8206 
8207 	if (!new_stream)
8208 		return;
8209 
8210 	/*
8211 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8212 	 * For now it's sufficient to just guard against these conditions.
8213 	 */
8214 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8215 		return;
8216 
8217 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8218 	vrr_params = acrtc->dm_irq_params.vrr_params;
8219 
8220 	if (new_crtc_state->vrr_supported &&
8221 	    config.min_refresh_in_uhz &&
8222 	    config.max_refresh_in_uhz) {
8223 		/*
8224 		 * if freesync compatible mode was set, config.state will be set
8225 		 * in atomic check
8226 		 */
8227 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8228 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8229 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8230 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8231 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8232 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8233 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8234 		} else {
8235 			config.state = new_crtc_state->base.vrr_enabled ?
8236 						     VRR_STATE_ACTIVE_VARIABLE :
8237 						     VRR_STATE_INACTIVE;
8238 		}
8239 	} else {
8240 		config.state = VRR_STATE_UNSUPPORTED;
8241 	}
8242 
8243 	mod_freesync_build_vrr_params(dm->freesync_module,
8244 				      new_stream,
8245 				      &config, &vrr_params);
8246 
8247 	new_crtc_state->freesync_timing_changed |=
8248 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8249 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8250 
8251 	new_crtc_state->freesync_config = config;
8252 	/* Copy state for access from DM IRQ handler */
8253 	acrtc->dm_irq_params.freesync_config = config;
8254 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8255 	acrtc->dm_irq_params.vrr_params = vrr_params;
8256 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8257 }
8258 
8259 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8260 					    struct dm_crtc_state *new_state)
8261 {
8262 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8263 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8264 
8265 	if (!old_vrr_active && new_vrr_active) {
8266 		/* Transition VRR inactive -> active:
8267 		 * While VRR is active, we must not disable vblank irq, as a
8268 		 * reenable after disable would compute bogus vblank/pflip
8269 		 * timestamps if it likely happened inside display front-porch.
8270 		 *
8271 		 * We also need vupdate irq for the actual core vblank handling
8272 		 * at end of vblank.
8273 		 */
8274 		dm_set_vupdate_irq(new_state->base.crtc, true);
8275 		drm_crtc_vblank_get(new_state->base.crtc);
8276 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8277 				 __func__, new_state->base.crtc->base.id);
8278 	} else if (old_vrr_active && !new_vrr_active) {
8279 		/* Transition VRR active -> inactive:
8280 		 * Allow vblank irq disable again for fixed refresh rate.
8281 		 */
8282 		dm_set_vupdate_irq(new_state->base.crtc, false);
8283 		drm_crtc_vblank_put(new_state->base.crtc);
8284 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8285 				 __func__, new_state->base.crtc->base.id);
8286 	}
8287 }
8288 
8289 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8290 {
8291 	struct drm_plane *plane;
8292 	struct drm_plane_state *old_plane_state;
8293 	int i;
8294 
8295 	/*
8296 	 * TODO: Make this per-stream so we don't issue redundant updates for
8297 	 * commits with multiple streams.
8298 	 */
8299 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8300 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8301 			handle_cursor_update(plane, old_plane_state);
8302 }
8303 
8304 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8305 				    struct dc_state *dc_state,
8306 				    struct drm_device *dev,
8307 				    struct amdgpu_display_manager *dm,
8308 				    struct drm_crtc *pcrtc,
8309 				    bool wait_for_vblank)
8310 {
8311 	uint32_t i;
8312 	uint64_t timestamp_ns;
8313 	struct drm_plane *plane;
8314 	struct drm_plane_state *old_plane_state, *new_plane_state;
8315 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8316 	struct drm_crtc_state *new_pcrtc_state =
8317 			drm_atomic_get_new_crtc_state(state, pcrtc);
8318 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8319 	struct dm_crtc_state *dm_old_crtc_state =
8320 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8321 	int planes_count = 0, vpos, hpos;
8322 	long r;
8323 	unsigned long flags;
8324 	struct amdgpu_bo *abo;
8325 	uint32_t target_vblank, last_flip_vblank;
8326 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8327 	bool pflip_present = false;
8328 	struct {
8329 		struct dc_surface_update surface_updates[MAX_SURFACES];
8330 		struct dc_plane_info plane_infos[MAX_SURFACES];
8331 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8332 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8333 		struct dc_stream_update stream_update;
8334 	} *bundle;
8335 
8336 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8337 
8338 	if (!bundle) {
8339 		dm_error("Failed to allocate update bundle\n");
8340 		goto cleanup;
8341 	}
8342 
8343 	/*
8344 	 * Disable the cursor first if we're disabling all the planes.
8345 	 * It'll remain on the screen after the planes are re-enabled
8346 	 * if we don't.
8347 	 */
8348 	if (acrtc_state->active_planes == 0)
8349 		amdgpu_dm_commit_cursors(state);
8350 
8351 	/* update planes when needed */
8352 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8353 		struct drm_crtc *crtc = new_plane_state->crtc;
8354 		struct drm_crtc_state *new_crtc_state;
8355 		struct drm_framebuffer *fb = new_plane_state->fb;
8356 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8357 		bool plane_needs_flip;
8358 		struct dc_plane_state *dc_plane;
8359 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8360 
8361 		/* Cursor plane is handled after stream updates */
8362 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8363 			continue;
8364 
8365 		if (!fb || !crtc || pcrtc != crtc)
8366 			continue;
8367 
8368 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8369 		if (!new_crtc_state->active)
8370 			continue;
8371 
8372 		dc_plane = dm_new_plane_state->dc_state;
8373 
8374 		bundle->surface_updates[planes_count].surface = dc_plane;
8375 		if (new_pcrtc_state->color_mgmt_changed) {
8376 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8377 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8378 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8379 		}
8380 
8381 		fill_dc_scaling_info(new_plane_state,
8382 				     &bundle->scaling_infos[planes_count]);
8383 
8384 		bundle->surface_updates[planes_count].scaling_info =
8385 			&bundle->scaling_infos[planes_count];
8386 
8387 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8388 
8389 		pflip_present = pflip_present || plane_needs_flip;
8390 
8391 		if (!plane_needs_flip) {
8392 			planes_count += 1;
8393 			continue;
8394 		}
8395 
8396 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8397 
8398 		/*
8399 		 * Wait for all fences on this FB. Do limited wait to avoid
8400 		 * deadlock during GPU reset when this fence will not signal
8401 		 * but we hold reservation lock for the BO.
8402 		 */
8403 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8404 							false,
8405 							msecs_to_jiffies(5000));
8406 		if (unlikely(r <= 0))
8407 			DRM_ERROR("Waiting for fences timed out!");
8408 
8409 		fill_dc_plane_info_and_addr(
8410 			dm->adev, new_plane_state,
8411 			afb->tiling_flags,
8412 			&bundle->plane_infos[planes_count],
8413 			&bundle->flip_addrs[planes_count].address,
8414 			afb->tmz_surface, false);
8415 
8416 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8417 				 new_plane_state->plane->index,
8418 				 bundle->plane_infos[planes_count].dcc.enable);
8419 
8420 		bundle->surface_updates[planes_count].plane_info =
8421 			&bundle->plane_infos[planes_count];
8422 
8423 		/*
8424 		 * Only allow immediate flips for fast updates that don't
8425 		 * change FB pitch, DCC state, rotation or mirroing.
8426 		 */
8427 		bundle->flip_addrs[planes_count].flip_immediate =
8428 			crtc->state->async_flip &&
8429 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8430 
8431 		timestamp_ns = ktime_get_ns();
8432 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8433 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8434 		bundle->surface_updates[planes_count].surface = dc_plane;
8435 
8436 		if (!bundle->surface_updates[planes_count].surface) {
8437 			DRM_ERROR("No surface for CRTC: id=%d\n",
8438 					acrtc_attach->crtc_id);
8439 			continue;
8440 		}
8441 
8442 		if (plane == pcrtc->primary)
8443 			update_freesync_state_on_stream(
8444 				dm,
8445 				acrtc_state,
8446 				acrtc_state->stream,
8447 				dc_plane,
8448 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8449 
8450 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8451 				 __func__,
8452 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8453 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8454 
8455 		planes_count += 1;
8456 
8457 	}
8458 
8459 	if (pflip_present) {
8460 		if (!vrr_active) {
8461 			/* Use old throttling in non-vrr fixed refresh rate mode
8462 			 * to keep flip scheduling based on target vblank counts
8463 			 * working in a backwards compatible way, e.g., for
8464 			 * clients using the GLX_OML_sync_control extension or
8465 			 * DRI3/Present extension with defined target_msc.
8466 			 */
8467 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8468 		}
8469 		else {
8470 			/* For variable refresh rate mode only:
8471 			 * Get vblank of last completed flip to avoid > 1 vrr
8472 			 * flips per video frame by use of throttling, but allow
8473 			 * flip programming anywhere in the possibly large
8474 			 * variable vrr vblank interval for fine-grained flip
8475 			 * timing control and more opportunity to avoid stutter
8476 			 * on late submission of flips.
8477 			 */
8478 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8479 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8480 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8481 		}
8482 
8483 		target_vblank = last_flip_vblank + wait_for_vblank;
8484 
8485 		/*
8486 		 * Wait until we're out of the vertical blank period before the one
8487 		 * targeted by the flip
8488 		 */
8489 		while ((acrtc_attach->enabled &&
8490 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8491 							    0, &vpos, &hpos, NULL,
8492 							    NULL, &pcrtc->hwmode)
8493 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8494 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8495 			(int)(target_vblank -
8496 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8497 			usleep_range(1000, 1100);
8498 		}
8499 
8500 		/**
8501 		 * Prepare the flip event for the pageflip interrupt to handle.
8502 		 *
8503 		 * This only works in the case where we've already turned on the
8504 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8505 		 * from 0 -> n planes we have to skip a hardware generated event
8506 		 * and rely on sending it from software.
8507 		 */
8508 		if (acrtc_attach->base.state->event &&
8509 		    acrtc_state->active_planes > 0) {
8510 			drm_crtc_vblank_get(pcrtc);
8511 
8512 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8513 
8514 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8515 			prepare_flip_isr(acrtc_attach);
8516 
8517 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8518 		}
8519 
8520 		if (acrtc_state->stream) {
8521 			if (acrtc_state->freesync_vrr_info_changed)
8522 				bundle->stream_update.vrr_infopacket =
8523 					&acrtc_state->stream->vrr_infopacket;
8524 		}
8525 	}
8526 
8527 	/* Update the planes if changed or disable if we don't have any. */
8528 	if ((planes_count || acrtc_state->active_planes == 0) &&
8529 		acrtc_state->stream) {
8530 		bundle->stream_update.stream = acrtc_state->stream;
8531 		if (new_pcrtc_state->mode_changed) {
8532 			bundle->stream_update.src = acrtc_state->stream->src;
8533 			bundle->stream_update.dst = acrtc_state->stream->dst;
8534 		}
8535 
8536 		if (new_pcrtc_state->color_mgmt_changed) {
8537 			/*
8538 			 * TODO: This isn't fully correct since we've actually
8539 			 * already modified the stream in place.
8540 			 */
8541 			bundle->stream_update.gamut_remap =
8542 				&acrtc_state->stream->gamut_remap_matrix;
8543 			bundle->stream_update.output_csc_transform =
8544 				&acrtc_state->stream->csc_color_matrix;
8545 			bundle->stream_update.out_transfer_func =
8546 				acrtc_state->stream->out_transfer_func;
8547 		}
8548 
8549 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8550 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8551 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8552 
8553 		/*
8554 		 * If FreeSync state on the stream has changed then we need to
8555 		 * re-adjust the min/max bounds now that DC doesn't handle this
8556 		 * as part of commit.
8557 		 */
8558 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8559 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8560 			dc_stream_adjust_vmin_vmax(
8561 				dm->dc, acrtc_state->stream,
8562 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8563 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8564 		}
8565 		mutex_lock(&dm->dc_lock);
8566 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8567 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8568 			amdgpu_dm_psr_disable(acrtc_state->stream);
8569 
8570 		dc_commit_updates_for_stream(dm->dc,
8571 						     bundle->surface_updates,
8572 						     planes_count,
8573 						     acrtc_state->stream,
8574 						     &bundle->stream_update,
8575 						     dc_state);
8576 
8577 		/**
8578 		 * Enable or disable the interrupts on the backend.
8579 		 *
8580 		 * Most pipes are put into power gating when unused.
8581 		 *
8582 		 * When power gating is enabled on a pipe we lose the
8583 		 * interrupt enablement state when power gating is disabled.
8584 		 *
8585 		 * So we need to update the IRQ control state in hardware
8586 		 * whenever the pipe turns on (since it could be previously
8587 		 * power gated) or off (since some pipes can't be power gated
8588 		 * on some ASICs).
8589 		 */
8590 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8591 			dm_update_pflip_irq_state(drm_to_adev(dev),
8592 						  acrtc_attach);
8593 
8594 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8595 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8596 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8597 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8598 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8599 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8600 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8601 			amdgpu_dm_psr_enable(acrtc_state->stream);
8602 		}
8603 
8604 		mutex_unlock(&dm->dc_lock);
8605 	}
8606 
8607 	/*
8608 	 * Update cursor state *after* programming all the planes.
8609 	 * This avoids redundant programming in the case where we're going
8610 	 * to be disabling a single plane - those pipes are being disabled.
8611 	 */
8612 	if (acrtc_state->active_planes)
8613 		amdgpu_dm_commit_cursors(state);
8614 
8615 cleanup:
8616 	kfree(bundle);
8617 }
8618 
8619 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8620 				   struct drm_atomic_state *state)
8621 {
8622 	struct amdgpu_device *adev = drm_to_adev(dev);
8623 	struct amdgpu_dm_connector *aconnector;
8624 	struct drm_connector *connector;
8625 	struct drm_connector_state *old_con_state, *new_con_state;
8626 	struct drm_crtc_state *new_crtc_state;
8627 	struct dm_crtc_state *new_dm_crtc_state;
8628 	const struct dc_stream_status *status;
8629 	int i, inst;
8630 
8631 	/* Notify device removals. */
8632 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8633 		if (old_con_state->crtc != new_con_state->crtc) {
8634 			/* CRTC changes require notification. */
8635 			goto notify;
8636 		}
8637 
8638 		if (!new_con_state->crtc)
8639 			continue;
8640 
8641 		new_crtc_state = drm_atomic_get_new_crtc_state(
8642 			state, new_con_state->crtc);
8643 
8644 		if (!new_crtc_state)
8645 			continue;
8646 
8647 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8648 			continue;
8649 
8650 	notify:
8651 		aconnector = to_amdgpu_dm_connector(connector);
8652 
8653 		mutex_lock(&adev->dm.audio_lock);
8654 		inst = aconnector->audio_inst;
8655 		aconnector->audio_inst = -1;
8656 		mutex_unlock(&adev->dm.audio_lock);
8657 
8658 		amdgpu_dm_audio_eld_notify(adev, inst);
8659 	}
8660 
8661 	/* Notify audio device additions. */
8662 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8663 		if (!new_con_state->crtc)
8664 			continue;
8665 
8666 		new_crtc_state = drm_atomic_get_new_crtc_state(
8667 			state, new_con_state->crtc);
8668 
8669 		if (!new_crtc_state)
8670 			continue;
8671 
8672 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8673 			continue;
8674 
8675 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8676 		if (!new_dm_crtc_state->stream)
8677 			continue;
8678 
8679 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8680 		if (!status)
8681 			continue;
8682 
8683 		aconnector = to_amdgpu_dm_connector(connector);
8684 
8685 		mutex_lock(&adev->dm.audio_lock);
8686 		inst = status->audio_inst;
8687 		aconnector->audio_inst = inst;
8688 		mutex_unlock(&adev->dm.audio_lock);
8689 
8690 		amdgpu_dm_audio_eld_notify(adev, inst);
8691 	}
8692 }
8693 
8694 /*
8695  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8696  * @crtc_state: the DRM CRTC state
8697  * @stream_state: the DC stream state.
8698  *
8699  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8700  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8701  */
8702 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8703 						struct dc_stream_state *stream_state)
8704 {
8705 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8706 }
8707 
8708 /**
8709  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8710  * @state: The atomic state to commit
8711  *
8712  * This will tell DC to commit the constructed DC state from atomic_check,
8713  * programming the hardware. Any failures here implies a hardware failure, since
8714  * atomic check should have filtered anything non-kosher.
8715  */
8716 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8717 {
8718 	struct drm_device *dev = state->dev;
8719 	struct amdgpu_device *adev = drm_to_adev(dev);
8720 	struct amdgpu_display_manager *dm = &adev->dm;
8721 	struct dm_atomic_state *dm_state;
8722 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8723 	uint32_t i, j;
8724 	struct drm_crtc *crtc;
8725 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8726 	unsigned long flags;
8727 	bool wait_for_vblank = true;
8728 	struct drm_connector *connector;
8729 	struct drm_connector_state *old_con_state, *new_con_state;
8730 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8731 	int crtc_disable_count = 0;
8732 	bool mode_set_reset_required = false;
8733 
8734 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8735 
8736 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8737 
8738 	dm_state = dm_atomic_get_new_state(state);
8739 	if (dm_state && dm_state->context) {
8740 		dc_state = dm_state->context;
8741 	} else {
8742 		/* No state changes, retain current state. */
8743 		dc_state_temp = dc_create_state(dm->dc);
8744 		ASSERT(dc_state_temp);
8745 		dc_state = dc_state_temp;
8746 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8747 	}
8748 
8749 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8750 				       new_crtc_state, i) {
8751 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8752 
8753 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8754 
8755 		if (old_crtc_state->active &&
8756 		    (!new_crtc_state->active ||
8757 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8758 			manage_dm_interrupts(adev, acrtc, false);
8759 			dc_stream_release(dm_old_crtc_state->stream);
8760 		}
8761 	}
8762 
8763 	drm_atomic_helper_calc_timestamping_constants(state);
8764 
8765 	/* update changed items */
8766 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8767 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8768 
8769 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8770 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8771 
8772 		DRM_DEBUG_ATOMIC(
8773 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8774 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8775 			"connectors_changed:%d\n",
8776 			acrtc->crtc_id,
8777 			new_crtc_state->enable,
8778 			new_crtc_state->active,
8779 			new_crtc_state->planes_changed,
8780 			new_crtc_state->mode_changed,
8781 			new_crtc_state->active_changed,
8782 			new_crtc_state->connectors_changed);
8783 
8784 		/* Disable cursor if disabling crtc */
8785 		if (old_crtc_state->active && !new_crtc_state->active) {
8786 			struct dc_cursor_position position;
8787 
8788 			memset(&position, 0, sizeof(position));
8789 			mutex_lock(&dm->dc_lock);
8790 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8791 			mutex_unlock(&dm->dc_lock);
8792 		}
8793 
8794 		/* Copy all transient state flags into dc state */
8795 		if (dm_new_crtc_state->stream) {
8796 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8797 							    dm_new_crtc_state->stream);
8798 		}
8799 
8800 		/* handles headless hotplug case, updating new_state and
8801 		 * aconnector as needed
8802 		 */
8803 
8804 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8805 
8806 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8807 
8808 			if (!dm_new_crtc_state->stream) {
8809 				/*
8810 				 * this could happen because of issues with
8811 				 * userspace notifications delivery.
8812 				 * In this case userspace tries to set mode on
8813 				 * display which is disconnected in fact.
8814 				 * dc_sink is NULL in this case on aconnector.
8815 				 * We expect reset mode will come soon.
8816 				 *
8817 				 * This can also happen when unplug is done
8818 				 * during resume sequence ended
8819 				 *
8820 				 * In this case, we want to pretend we still
8821 				 * have a sink to keep the pipe running so that
8822 				 * hw state is consistent with the sw state
8823 				 */
8824 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8825 						__func__, acrtc->base.base.id);
8826 				continue;
8827 			}
8828 
8829 			if (dm_old_crtc_state->stream)
8830 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8831 
8832 			pm_runtime_get_noresume(dev->dev);
8833 
8834 			acrtc->enabled = true;
8835 			acrtc->hw_mode = new_crtc_state->mode;
8836 			crtc->hwmode = new_crtc_state->mode;
8837 			mode_set_reset_required = true;
8838 		} else if (modereset_required(new_crtc_state)) {
8839 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8840 			/* i.e. reset mode */
8841 			if (dm_old_crtc_state->stream)
8842 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8843 
8844 			mode_set_reset_required = true;
8845 		}
8846 	} /* for_each_crtc_in_state() */
8847 
8848 	if (dc_state) {
8849 		/* if there mode set or reset, disable eDP PSR */
8850 		if (mode_set_reset_required)
8851 			amdgpu_dm_psr_disable_all(dm);
8852 
8853 		dm_enable_per_frame_crtc_master_sync(dc_state);
8854 		mutex_lock(&dm->dc_lock);
8855 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8856 #if defined(CONFIG_DRM_AMD_DC_DCN)
8857                /* Allow idle optimization when vblank count is 0 for display off */
8858                if (dm->active_vblank_irq_count == 0)
8859                    dc_allow_idle_optimizations(dm->dc,true);
8860 #endif
8861 		mutex_unlock(&dm->dc_lock);
8862 	}
8863 
8864 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8865 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8866 
8867 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8868 
8869 		if (dm_new_crtc_state->stream != NULL) {
8870 			const struct dc_stream_status *status =
8871 					dc_stream_get_status(dm_new_crtc_state->stream);
8872 
8873 			if (!status)
8874 				status = dc_stream_get_status_from_state(dc_state,
8875 									 dm_new_crtc_state->stream);
8876 			if (!status)
8877 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8878 			else
8879 				acrtc->otg_inst = status->primary_otg_inst;
8880 		}
8881 	}
8882 #ifdef CONFIG_DRM_AMD_DC_HDCP
8883 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8884 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8885 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8886 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8887 
8888 		new_crtc_state = NULL;
8889 
8890 		if (acrtc)
8891 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8892 
8893 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8894 
8895 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8896 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8897 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8898 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8899 			dm_new_con_state->update_hdcp = true;
8900 			continue;
8901 		}
8902 
8903 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8904 			hdcp_update_display(
8905 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8906 				new_con_state->hdcp_content_type,
8907 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8908 	}
8909 #endif
8910 
8911 	/* Handle connector state changes */
8912 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8913 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8914 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8915 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8916 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8917 		struct dc_stream_update stream_update;
8918 		struct dc_info_packet hdr_packet;
8919 		struct dc_stream_status *status = NULL;
8920 		bool abm_changed, hdr_changed, scaling_changed;
8921 
8922 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8923 		memset(&stream_update, 0, sizeof(stream_update));
8924 
8925 		if (acrtc) {
8926 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8927 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8928 		}
8929 
8930 		/* Skip any modesets/resets */
8931 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8932 			continue;
8933 
8934 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8935 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8936 
8937 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8938 							     dm_old_con_state);
8939 
8940 		abm_changed = dm_new_crtc_state->abm_level !=
8941 			      dm_old_crtc_state->abm_level;
8942 
8943 		hdr_changed =
8944 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8945 
8946 		if (!scaling_changed && !abm_changed && !hdr_changed)
8947 			continue;
8948 
8949 		stream_update.stream = dm_new_crtc_state->stream;
8950 		if (scaling_changed) {
8951 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8952 					dm_new_con_state, dm_new_crtc_state->stream);
8953 
8954 			stream_update.src = dm_new_crtc_state->stream->src;
8955 			stream_update.dst = dm_new_crtc_state->stream->dst;
8956 		}
8957 
8958 		if (abm_changed) {
8959 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8960 
8961 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8962 		}
8963 
8964 		if (hdr_changed) {
8965 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8966 			stream_update.hdr_static_metadata = &hdr_packet;
8967 		}
8968 
8969 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8970 		WARN_ON(!status);
8971 		WARN_ON(!status->plane_count);
8972 
8973 		/*
8974 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8975 		 * Here we create an empty update on each plane.
8976 		 * To fix this, DC should permit updating only stream properties.
8977 		 */
8978 		for (j = 0; j < status->plane_count; j++)
8979 			dummy_updates[j].surface = status->plane_states[0];
8980 
8981 
8982 		mutex_lock(&dm->dc_lock);
8983 		dc_commit_updates_for_stream(dm->dc,
8984 						     dummy_updates,
8985 						     status->plane_count,
8986 						     dm_new_crtc_state->stream,
8987 						     &stream_update,
8988 						     dc_state);
8989 		mutex_unlock(&dm->dc_lock);
8990 	}
8991 
8992 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8993 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8994 				      new_crtc_state, i) {
8995 		if (old_crtc_state->active && !new_crtc_state->active)
8996 			crtc_disable_count++;
8997 
8998 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8999 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9000 
9001 		/* For freesync config update on crtc state and params for irq */
9002 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9003 
9004 		/* Handle vrr on->off / off->on transitions */
9005 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9006 						dm_new_crtc_state);
9007 	}
9008 
9009 	/**
9010 	 * Enable interrupts for CRTCs that are newly enabled or went through
9011 	 * a modeset. It was intentionally deferred until after the front end
9012 	 * state was modified to wait until the OTG was on and so the IRQ
9013 	 * handlers didn't access stale or invalid state.
9014 	 */
9015 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9016 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9017 #ifdef CONFIG_DEBUG_FS
9018 		bool configure_crc = false;
9019 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9020 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9021 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9022 #endif
9023 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9024 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9025 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9026 #endif
9027 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9028 
9029 		if (new_crtc_state->active &&
9030 		    (!old_crtc_state->active ||
9031 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9032 			dc_stream_retain(dm_new_crtc_state->stream);
9033 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9034 			manage_dm_interrupts(adev, acrtc, true);
9035 
9036 #ifdef CONFIG_DEBUG_FS
9037 			/**
9038 			 * Frontend may have changed so reapply the CRC capture
9039 			 * settings for the stream.
9040 			 */
9041 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9042 
9043 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9044 				configure_crc = true;
9045 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9046 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9047 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9048 					acrtc->dm_irq_params.crc_window.update_win = true;
9049 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9050 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9051 					crc_rd_wrk->crtc = crtc;
9052 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9053 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9054 				}
9055 #endif
9056 			}
9057 
9058 			if (configure_crc)
9059 				if (amdgpu_dm_crtc_configure_crc_source(
9060 					crtc, dm_new_crtc_state, cur_crc_src))
9061 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9062 #endif
9063 		}
9064 	}
9065 
9066 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9067 		if (new_crtc_state->async_flip)
9068 			wait_for_vblank = false;
9069 
9070 	/* update planes when needed per crtc*/
9071 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9072 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9073 
9074 		if (dm_new_crtc_state->stream)
9075 			amdgpu_dm_commit_planes(state, dc_state, dev,
9076 						dm, crtc, wait_for_vblank);
9077 	}
9078 
9079 	/* Update audio instances for each connector. */
9080 	amdgpu_dm_commit_audio(dev, state);
9081 
9082 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9083 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9084 	/* restore the backlight level */
9085 	if (dm->backlight_dev)
9086 		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9087 #endif
9088 	/*
9089 	 * send vblank event on all events not handled in flip and
9090 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9091 	 */
9092 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9093 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9094 
9095 		if (new_crtc_state->event)
9096 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9097 
9098 		new_crtc_state->event = NULL;
9099 	}
9100 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9101 
9102 	/* Signal HW programming completion */
9103 	drm_atomic_helper_commit_hw_done(state);
9104 
9105 	if (wait_for_vblank)
9106 		drm_atomic_helper_wait_for_flip_done(dev, state);
9107 
9108 	drm_atomic_helper_cleanup_planes(dev, state);
9109 
9110 	/* return the stolen vga memory back to VRAM */
9111 	if (!adev->mman.keep_stolen_vga_memory)
9112 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9113 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9114 
9115 	/*
9116 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9117 	 * so we can put the GPU into runtime suspend if we're not driving any
9118 	 * displays anymore
9119 	 */
9120 	for (i = 0; i < crtc_disable_count; i++)
9121 		pm_runtime_put_autosuspend(dev->dev);
9122 	pm_runtime_mark_last_busy(dev->dev);
9123 
9124 	if (dc_state_temp)
9125 		dc_release_state(dc_state_temp);
9126 }
9127 
9128 
9129 static int dm_force_atomic_commit(struct drm_connector *connector)
9130 {
9131 	int ret = 0;
9132 	struct drm_device *ddev = connector->dev;
9133 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9134 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9135 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9136 	struct drm_connector_state *conn_state;
9137 	struct drm_crtc_state *crtc_state;
9138 	struct drm_plane_state *plane_state;
9139 
9140 	if (!state)
9141 		return -ENOMEM;
9142 
9143 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9144 
9145 	/* Construct an atomic state to restore previous display setting */
9146 
9147 	/*
9148 	 * Attach connectors to drm_atomic_state
9149 	 */
9150 	conn_state = drm_atomic_get_connector_state(state, connector);
9151 
9152 	ret = PTR_ERR_OR_ZERO(conn_state);
9153 	if (ret)
9154 		goto out;
9155 
9156 	/* Attach crtc to drm_atomic_state*/
9157 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9158 
9159 	ret = PTR_ERR_OR_ZERO(crtc_state);
9160 	if (ret)
9161 		goto out;
9162 
9163 	/* force a restore */
9164 	crtc_state->mode_changed = true;
9165 
9166 	/* Attach plane to drm_atomic_state */
9167 	plane_state = drm_atomic_get_plane_state(state, plane);
9168 
9169 	ret = PTR_ERR_OR_ZERO(plane_state);
9170 	if (ret)
9171 		goto out;
9172 
9173 	/* Call commit internally with the state we just constructed */
9174 	ret = drm_atomic_commit(state);
9175 
9176 out:
9177 	drm_atomic_state_put(state);
9178 	if (ret)
9179 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9180 
9181 	return ret;
9182 }
9183 
9184 /*
9185  * This function handles all cases when set mode does not come upon hotplug.
9186  * This includes when a display is unplugged then plugged back into the
9187  * same port and when running without usermode desktop manager supprot
9188  */
9189 void dm_restore_drm_connector_state(struct drm_device *dev,
9190 				    struct drm_connector *connector)
9191 {
9192 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9193 	struct amdgpu_crtc *disconnected_acrtc;
9194 	struct dm_crtc_state *acrtc_state;
9195 
9196 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9197 		return;
9198 
9199 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9200 	if (!disconnected_acrtc)
9201 		return;
9202 
9203 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9204 	if (!acrtc_state->stream)
9205 		return;
9206 
9207 	/*
9208 	 * If the previous sink is not released and different from the current,
9209 	 * we deduce we are in a state where we can not rely on usermode call
9210 	 * to turn on the display, so we do it here
9211 	 */
9212 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9213 		dm_force_atomic_commit(&aconnector->base);
9214 }
9215 
9216 /*
9217  * Grabs all modesetting locks to serialize against any blocking commits,
9218  * Waits for completion of all non blocking commits.
9219  */
9220 static int do_aquire_global_lock(struct drm_device *dev,
9221 				 struct drm_atomic_state *state)
9222 {
9223 	struct drm_crtc *crtc;
9224 	struct drm_crtc_commit *commit;
9225 	long ret;
9226 
9227 	/*
9228 	 * Adding all modeset locks to aquire_ctx will
9229 	 * ensure that when the framework release it the
9230 	 * extra locks we are locking here will get released to
9231 	 */
9232 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9233 	if (ret)
9234 		return ret;
9235 
9236 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9237 		spin_lock(&crtc->commit_lock);
9238 		commit = list_first_entry_or_null(&crtc->commit_list,
9239 				struct drm_crtc_commit, commit_entry);
9240 		if (commit)
9241 			drm_crtc_commit_get(commit);
9242 		spin_unlock(&crtc->commit_lock);
9243 
9244 		if (!commit)
9245 			continue;
9246 
9247 		/*
9248 		 * Make sure all pending HW programming completed and
9249 		 * page flips done
9250 		 */
9251 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9252 
9253 		if (ret > 0)
9254 			ret = wait_for_completion_interruptible_timeout(
9255 					&commit->flip_done, 10*HZ);
9256 
9257 		if (ret == 0)
9258 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9259 				  "timed out\n", crtc->base.id, crtc->name);
9260 
9261 		drm_crtc_commit_put(commit);
9262 	}
9263 
9264 	return ret < 0 ? ret : 0;
9265 }
9266 
9267 static void get_freesync_config_for_crtc(
9268 	struct dm_crtc_state *new_crtc_state,
9269 	struct dm_connector_state *new_con_state)
9270 {
9271 	struct mod_freesync_config config = {0};
9272 	struct amdgpu_dm_connector *aconnector =
9273 			to_amdgpu_dm_connector(new_con_state->base.connector);
9274 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9275 	int vrefresh = drm_mode_vrefresh(mode);
9276 	bool fs_vid_mode = false;
9277 
9278 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9279 					vrefresh >= aconnector->min_vfreq &&
9280 					vrefresh <= aconnector->max_vfreq;
9281 
9282 	if (new_crtc_state->vrr_supported) {
9283 		new_crtc_state->stream->ignore_msa_timing_param = true;
9284 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9285 
9286 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9287 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9288 		config.vsif_supported = true;
9289 		config.btr = true;
9290 
9291 		if (fs_vid_mode) {
9292 			config.state = VRR_STATE_ACTIVE_FIXED;
9293 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9294 			goto out;
9295 		} else if (new_crtc_state->base.vrr_enabled) {
9296 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9297 		} else {
9298 			config.state = VRR_STATE_INACTIVE;
9299 		}
9300 	}
9301 out:
9302 	new_crtc_state->freesync_config = config;
9303 }
9304 
9305 static void reset_freesync_config_for_crtc(
9306 	struct dm_crtc_state *new_crtc_state)
9307 {
9308 	new_crtc_state->vrr_supported = false;
9309 
9310 	memset(&new_crtc_state->vrr_infopacket, 0,
9311 	       sizeof(new_crtc_state->vrr_infopacket));
9312 }
9313 
9314 static bool
9315 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9316 				 struct drm_crtc_state *new_crtc_state)
9317 {
9318 	struct drm_display_mode old_mode, new_mode;
9319 
9320 	if (!old_crtc_state || !new_crtc_state)
9321 		return false;
9322 
9323 	old_mode = old_crtc_state->mode;
9324 	new_mode = new_crtc_state->mode;
9325 
9326 	if (old_mode.clock       == new_mode.clock &&
9327 	    old_mode.hdisplay    == new_mode.hdisplay &&
9328 	    old_mode.vdisplay    == new_mode.vdisplay &&
9329 	    old_mode.htotal      == new_mode.htotal &&
9330 	    old_mode.vtotal      != new_mode.vtotal &&
9331 	    old_mode.hsync_start == new_mode.hsync_start &&
9332 	    old_mode.vsync_start != new_mode.vsync_start &&
9333 	    old_mode.hsync_end   == new_mode.hsync_end &&
9334 	    old_mode.vsync_end   != new_mode.vsync_end &&
9335 	    old_mode.hskew       == new_mode.hskew &&
9336 	    old_mode.vscan       == new_mode.vscan &&
9337 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9338 	    (new_mode.vsync_end - new_mode.vsync_start))
9339 		return true;
9340 
9341 	return false;
9342 }
9343 
9344 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9345 	uint64_t num, den, res;
9346 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9347 
9348 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9349 
9350 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9351 	den = (unsigned long long)new_crtc_state->mode.htotal *
9352 	      (unsigned long long)new_crtc_state->mode.vtotal;
9353 
9354 	res = div_u64(num, den);
9355 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9356 }
9357 
9358 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9359 				struct drm_atomic_state *state,
9360 				struct drm_crtc *crtc,
9361 				struct drm_crtc_state *old_crtc_state,
9362 				struct drm_crtc_state *new_crtc_state,
9363 				bool enable,
9364 				bool *lock_and_validation_needed)
9365 {
9366 	struct dm_atomic_state *dm_state = NULL;
9367 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9368 	struct dc_stream_state *new_stream;
9369 	int ret = 0;
9370 
9371 	/*
9372 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9373 	 * update changed items
9374 	 */
9375 	struct amdgpu_crtc *acrtc = NULL;
9376 	struct amdgpu_dm_connector *aconnector = NULL;
9377 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9378 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9379 
9380 	new_stream = NULL;
9381 
9382 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9383 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9384 	acrtc = to_amdgpu_crtc(crtc);
9385 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9386 
9387 	/* TODO This hack should go away */
9388 	if (aconnector && enable) {
9389 		/* Make sure fake sink is created in plug-in scenario */
9390 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9391 							    &aconnector->base);
9392 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9393 							    &aconnector->base);
9394 
9395 		if (IS_ERR(drm_new_conn_state)) {
9396 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9397 			goto fail;
9398 		}
9399 
9400 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9401 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9402 
9403 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9404 			goto skip_modeset;
9405 
9406 		new_stream = create_validate_stream_for_sink(aconnector,
9407 							     &new_crtc_state->mode,
9408 							     dm_new_conn_state,
9409 							     dm_old_crtc_state->stream);
9410 
9411 		/*
9412 		 * we can have no stream on ACTION_SET if a display
9413 		 * was disconnected during S3, in this case it is not an
9414 		 * error, the OS will be updated after detection, and
9415 		 * will do the right thing on next atomic commit
9416 		 */
9417 
9418 		if (!new_stream) {
9419 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9420 					__func__, acrtc->base.base.id);
9421 			ret = -ENOMEM;
9422 			goto fail;
9423 		}
9424 
9425 		/*
9426 		 * TODO: Check VSDB bits to decide whether this should
9427 		 * be enabled or not.
9428 		 */
9429 		new_stream->triggered_crtc_reset.enabled =
9430 			dm->force_timing_sync;
9431 
9432 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9433 
9434 		ret = fill_hdr_info_packet(drm_new_conn_state,
9435 					   &new_stream->hdr_static_metadata);
9436 		if (ret)
9437 			goto fail;
9438 
9439 		/*
9440 		 * If we already removed the old stream from the context
9441 		 * (and set the new stream to NULL) then we can't reuse
9442 		 * the old stream even if the stream and scaling are unchanged.
9443 		 * We'll hit the BUG_ON and black screen.
9444 		 *
9445 		 * TODO: Refactor this function to allow this check to work
9446 		 * in all conditions.
9447 		 */
9448 		if (amdgpu_freesync_vid_mode &&
9449 		    dm_new_crtc_state->stream &&
9450 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9451 			goto skip_modeset;
9452 
9453 		if (dm_new_crtc_state->stream &&
9454 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9455 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9456 			new_crtc_state->mode_changed = false;
9457 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9458 					 new_crtc_state->mode_changed);
9459 		}
9460 	}
9461 
9462 	/* mode_changed flag may get updated above, need to check again */
9463 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9464 		goto skip_modeset;
9465 
9466 	DRM_DEBUG_ATOMIC(
9467 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9468 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9469 		"connectors_changed:%d\n",
9470 		acrtc->crtc_id,
9471 		new_crtc_state->enable,
9472 		new_crtc_state->active,
9473 		new_crtc_state->planes_changed,
9474 		new_crtc_state->mode_changed,
9475 		new_crtc_state->active_changed,
9476 		new_crtc_state->connectors_changed);
9477 
9478 	/* Remove stream for any changed/disabled CRTC */
9479 	if (!enable) {
9480 
9481 		if (!dm_old_crtc_state->stream)
9482 			goto skip_modeset;
9483 
9484 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9485 		    is_timing_unchanged_for_freesync(new_crtc_state,
9486 						     old_crtc_state)) {
9487 			new_crtc_state->mode_changed = false;
9488 			DRM_DEBUG_DRIVER(
9489 				"Mode change not required for front porch change, "
9490 				"setting mode_changed to %d",
9491 				new_crtc_state->mode_changed);
9492 
9493 			set_freesync_fixed_config(dm_new_crtc_state);
9494 
9495 			goto skip_modeset;
9496 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9497 			   is_freesync_video_mode(&new_crtc_state->mode,
9498 						  aconnector)) {
9499 			set_freesync_fixed_config(dm_new_crtc_state);
9500 		}
9501 
9502 		ret = dm_atomic_get_state(state, &dm_state);
9503 		if (ret)
9504 			goto fail;
9505 
9506 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9507 				crtc->base.id);
9508 
9509 		/* i.e. reset mode */
9510 		if (dc_remove_stream_from_ctx(
9511 				dm->dc,
9512 				dm_state->context,
9513 				dm_old_crtc_state->stream) != DC_OK) {
9514 			ret = -EINVAL;
9515 			goto fail;
9516 		}
9517 
9518 		dc_stream_release(dm_old_crtc_state->stream);
9519 		dm_new_crtc_state->stream = NULL;
9520 
9521 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9522 
9523 		*lock_and_validation_needed = true;
9524 
9525 	} else {/* Add stream for any updated/enabled CRTC */
9526 		/*
9527 		 * Quick fix to prevent NULL pointer on new_stream when
9528 		 * added MST connectors not found in existing crtc_state in the chained mode
9529 		 * TODO: need to dig out the root cause of that
9530 		 */
9531 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9532 			goto skip_modeset;
9533 
9534 		if (modereset_required(new_crtc_state))
9535 			goto skip_modeset;
9536 
9537 		if (modeset_required(new_crtc_state, new_stream,
9538 				     dm_old_crtc_state->stream)) {
9539 
9540 			WARN_ON(dm_new_crtc_state->stream);
9541 
9542 			ret = dm_atomic_get_state(state, &dm_state);
9543 			if (ret)
9544 				goto fail;
9545 
9546 			dm_new_crtc_state->stream = new_stream;
9547 
9548 			dc_stream_retain(new_stream);
9549 
9550 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9551 					 crtc->base.id);
9552 
9553 			if (dc_add_stream_to_ctx(
9554 					dm->dc,
9555 					dm_state->context,
9556 					dm_new_crtc_state->stream) != DC_OK) {
9557 				ret = -EINVAL;
9558 				goto fail;
9559 			}
9560 
9561 			*lock_and_validation_needed = true;
9562 		}
9563 	}
9564 
9565 skip_modeset:
9566 	/* Release extra reference */
9567 	if (new_stream)
9568 		 dc_stream_release(new_stream);
9569 
9570 	/*
9571 	 * We want to do dc stream updates that do not require a
9572 	 * full modeset below.
9573 	 */
9574 	if (!(enable && aconnector && new_crtc_state->active))
9575 		return 0;
9576 	/*
9577 	 * Given above conditions, the dc state cannot be NULL because:
9578 	 * 1. We're in the process of enabling CRTCs (just been added
9579 	 *    to the dc context, or already is on the context)
9580 	 * 2. Has a valid connector attached, and
9581 	 * 3. Is currently active and enabled.
9582 	 * => The dc stream state currently exists.
9583 	 */
9584 	BUG_ON(dm_new_crtc_state->stream == NULL);
9585 
9586 	/* Scaling or underscan settings */
9587 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9588 		update_stream_scaling_settings(
9589 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9590 
9591 	/* ABM settings */
9592 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9593 
9594 	/*
9595 	 * Color management settings. We also update color properties
9596 	 * when a modeset is needed, to ensure it gets reprogrammed.
9597 	 */
9598 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9599 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9600 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9601 		if (ret)
9602 			goto fail;
9603 	}
9604 
9605 	/* Update Freesync settings. */
9606 	get_freesync_config_for_crtc(dm_new_crtc_state,
9607 				     dm_new_conn_state);
9608 
9609 	return ret;
9610 
9611 fail:
9612 	if (new_stream)
9613 		dc_stream_release(new_stream);
9614 	return ret;
9615 }
9616 
9617 static bool should_reset_plane(struct drm_atomic_state *state,
9618 			       struct drm_plane *plane,
9619 			       struct drm_plane_state *old_plane_state,
9620 			       struct drm_plane_state *new_plane_state)
9621 {
9622 	struct drm_plane *other;
9623 	struct drm_plane_state *old_other_state, *new_other_state;
9624 	struct drm_crtc_state *new_crtc_state;
9625 	int i;
9626 
9627 	/*
9628 	 * TODO: Remove this hack once the checks below are sufficient
9629 	 * enough to determine when we need to reset all the planes on
9630 	 * the stream.
9631 	 */
9632 	if (state->allow_modeset)
9633 		return true;
9634 
9635 	/* Exit early if we know that we're adding or removing the plane. */
9636 	if (old_plane_state->crtc != new_plane_state->crtc)
9637 		return true;
9638 
9639 	/* old crtc == new_crtc == NULL, plane not in context. */
9640 	if (!new_plane_state->crtc)
9641 		return false;
9642 
9643 	new_crtc_state =
9644 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9645 
9646 	if (!new_crtc_state)
9647 		return true;
9648 
9649 	/* CRTC Degamma changes currently require us to recreate planes. */
9650 	if (new_crtc_state->color_mgmt_changed)
9651 		return true;
9652 
9653 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9654 		return true;
9655 
9656 	/*
9657 	 * If there are any new primary or overlay planes being added or
9658 	 * removed then the z-order can potentially change. To ensure
9659 	 * correct z-order and pipe acquisition the current DC architecture
9660 	 * requires us to remove and recreate all existing planes.
9661 	 *
9662 	 * TODO: Come up with a more elegant solution for this.
9663 	 */
9664 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9665 		struct amdgpu_framebuffer *old_afb, *new_afb;
9666 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9667 			continue;
9668 
9669 		if (old_other_state->crtc != new_plane_state->crtc &&
9670 		    new_other_state->crtc != new_plane_state->crtc)
9671 			continue;
9672 
9673 		if (old_other_state->crtc != new_other_state->crtc)
9674 			return true;
9675 
9676 		/* Src/dst size and scaling updates. */
9677 		if (old_other_state->src_w != new_other_state->src_w ||
9678 		    old_other_state->src_h != new_other_state->src_h ||
9679 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9680 		    old_other_state->crtc_h != new_other_state->crtc_h)
9681 			return true;
9682 
9683 		/* Rotation / mirroring updates. */
9684 		if (old_other_state->rotation != new_other_state->rotation)
9685 			return true;
9686 
9687 		/* Blending updates. */
9688 		if (old_other_state->pixel_blend_mode !=
9689 		    new_other_state->pixel_blend_mode)
9690 			return true;
9691 
9692 		/* Alpha updates. */
9693 		if (old_other_state->alpha != new_other_state->alpha)
9694 			return true;
9695 
9696 		/* Colorspace changes. */
9697 		if (old_other_state->color_range != new_other_state->color_range ||
9698 		    old_other_state->color_encoding != new_other_state->color_encoding)
9699 			return true;
9700 
9701 		/* Framebuffer checks fall at the end. */
9702 		if (!old_other_state->fb || !new_other_state->fb)
9703 			continue;
9704 
9705 		/* Pixel format changes can require bandwidth updates. */
9706 		if (old_other_state->fb->format != new_other_state->fb->format)
9707 			return true;
9708 
9709 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9710 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9711 
9712 		/* Tiling and DCC changes also require bandwidth updates. */
9713 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9714 		    old_afb->base.modifier != new_afb->base.modifier)
9715 			return true;
9716 	}
9717 
9718 	return false;
9719 }
9720 
9721 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9722 			      struct drm_plane_state *new_plane_state,
9723 			      struct drm_framebuffer *fb)
9724 {
9725 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9726 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9727 	unsigned int pitch;
9728 	bool linear;
9729 
9730 	if (fb->width > new_acrtc->max_cursor_width ||
9731 	    fb->height > new_acrtc->max_cursor_height) {
9732 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9733 				 new_plane_state->fb->width,
9734 				 new_plane_state->fb->height);
9735 		return -EINVAL;
9736 	}
9737 	if (new_plane_state->src_w != fb->width << 16 ||
9738 	    new_plane_state->src_h != fb->height << 16) {
9739 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9740 		return -EINVAL;
9741 	}
9742 
9743 	/* Pitch in pixels */
9744 	pitch = fb->pitches[0] / fb->format->cpp[0];
9745 
9746 	if (fb->width != pitch) {
9747 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9748 				 fb->width, pitch);
9749 		return -EINVAL;
9750 	}
9751 
9752 	switch (pitch) {
9753 	case 64:
9754 	case 128:
9755 	case 256:
9756 		/* FB pitch is supported by cursor plane */
9757 		break;
9758 	default:
9759 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9760 		return -EINVAL;
9761 	}
9762 
9763 	/* Core DRM takes care of checking FB modifiers, so we only need to
9764 	 * check tiling flags when the FB doesn't have a modifier. */
9765 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9766 		if (adev->family < AMDGPU_FAMILY_AI) {
9767 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9768 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9769 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9770 		} else {
9771 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9772 		}
9773 		if (!linear) {
9774 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9775 			return -EINVAL;
9776 		}
9777 	}
9778 
9779 	return 0;
9780 }
9781 
9782 static int dm_update_plane_state(struct dc *dc,
9783 				 struct drm_atomic_state *state,
9784 				 struct drm_plane *plane,
9785 				 struct drm_plane_state *old_plane_state,
9786 				 struct drm_plane_state *new_plane_state,
9787 				 bool enable,
9788 				 bool *lock_and_validation_needed)
9789 {
9790 
9791 	struct dm_atomic_state *dm_state = NULL;
9792 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9793 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9794 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9795 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9796 	struct amdgpu_crtc *new_acrtc;
9797 	bool needs_reset;
9798 	int ret = 0;
9799 
9800 
9801 	new_plane_crtc = new_plane_state->crtc;
9802 	old_plane_crtc = old_plane_state->crtc;
9803 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9804 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9805 
9806 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9807 		if (!enable || !new_plane_crtc ||
9808 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9809 			return 0;
9810 
9811 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9812 
9813 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9814 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9815 			return -EINVAL;
9816 		}
9817 
9818 		if (new_plane_state->fb) {
9819 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9820 						 new_plane_state->fb);
9821 			if (ret)
9822 				return ret;
9823 		}
9824 
9825 		return 0;
9826 	}
9827 
9828 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9829 					 new_plane_state);
9830 
9831 	/* Remove any changed/removed planes */
9832 	if (!enable) {
9833 		if (!needs_reset)
9834 			return 0;
9835 
9836 		if (!old_plane_crtc)
9837 			return 0;
9838 
9839 		old_crtc_state = drm_atomic_get_old_crtc_state(
9840 				state, old_plane_crtc);
9841 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9842 
9843 		if (!dm_old_crtc_state->stream)
9844 			return 0;
9845 
9846 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9847 				plane->base.id, old_plane_crtc->base.id);
9848 
9849 		ret = dm_atomic_get_state(state, &dm_state);
9850 		if (ret)
9851 			return ret;
9852 
9853 		if (!dc_remove_plane_from_context(
9854 				dc,
9855 				dm_old_crtc_state->stream,
9856 				dm_old_plane_state->dc_state,
9857 				dm_state->context)) {
9858 
9859 			return -EINVAL;
9860 		}
9861 
9862 
9863 		dc_plane_state_release(dm_old_plane_state->dc_state);
9864 		dm_new_plane_state->dc_state = NULL;
9865 
9866 		*lock_and_validation_needed = true;
9867 
9868 	} else { /* Add new planes */
9869 		struct dc_plane_state *dc_new_plane_state;
9870 
9871 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9872 			return 0;
9873 
9874 		if (!new_plane_crtc)
9875 			return 0;
9876 
9877 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9878 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9879 
9880 		if (!dm_new_crtc_state->stream)
9881 			return 0;
9882 
9883 		if (!needs_reset)
9884 			return 0;
9885 
9886 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9887 		if (ret)
9888 			return ret;
9889 
9890 		WARN_ON(dm_new_plane_state->dc_state);
9891 
9892 		dc_new_plane_state = dc_create_plane_state(dc);
9893 		if (!dc_new_plane_state)
9894 			return -ENOMEM;
9895 
9896 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9897 				 plane->base.id, new_plane_crtc->base.id);
9898 
9899 		ret = fill_dc_plane_attributes(
9900 			drm_to_adev(new_plane_crtc->dev),
9901 			dc_new_plane_state,
9902 			new_plane_state,
9903 			new_crtc_state);
9904 		if (ret) {
9905 			dc_plane_state_release(dc_new_plane_state);
9906 			return ret;
9907 		}
9908 
9909 		ret = dm_atomic_get_state(state, &dm_state);
9910 		if (ret) {
9911 			dc_plane_state_release(dc_new_plane_state);
9912 			return ret;
9913 		}
9914 
9915 		/*
9916 		 * Any atomic check errors that occur after this will
9917 		 * not need a release. The plane state will be attached
9918 		 * to the stream, and therefore part of the atomic
9919 		 * state. It'll be released when the atomic state is
9920 		 * cleaned.
9921 		 */
9922 		if (!dc_add_plane_to_context(
9923 				dc,
9924 				dm_new_crtc_state->stream,
9925 				dc_new_plane_state,
9926 				dm_state->context)) {
9927 
9928 			dc_plane_state_release(dc_new_plane_state);
9929 			return -EINVAL;
9930 		}
9931 
9932 		dm_new_plane_state->dc_state = dc_new_plane_state;
9933 
9934 		/* Tell DC to do a full surface update every time there
9935 		 * is a plane change. Inefficient, but works for now.
9936 		 */
9937 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9938 
9939 		*lock_and_validation_needed = true;
9940 	}
9941 
9942 
9943 	return ret;
9944 }
9945 
9946 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9947 				struct drm_crtc *crtc,
9948 				struct drm_crtc_state *new_crtc_state)
9949 {
9950 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9951 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9952 
9953 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9954 	 * cursor per pipe but it's going to inherit the scaling and
9955 	 * positioning from the underlying pipe. Check the cursor plane's
9956 	 * blending properties match the primary plane's. */
9957 
9958 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9959 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9960 	if (!new_cursor_state || !new_primary_state ||
9961 	    !new_cursor_state->fb || !new_primary_state->fb) {
9962 		return 0;
9963 	}
9964 
9965 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9966 			 (new_cursor_state->src_w >> 16);
9967 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9968 			 (new_cursor_state->src_h >> 16);
9969 
9970 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9971 			 (new_primary_state->src_w >> 16);
9972 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9973 			 (new_primary_state->src_h >> 16);
9974 
9975 	if (cursor_scale_w != primary_scale_w ||
9976 	    cursor_scale_h != primary_scale_h) {
9977 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9978 		return -EINVAL;
9979 	}
9980 
9981 	return 0;
9982 }
9983 
9984 #if defined(CONFIG_DRM_AMD_DC_DCN)
9985 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9986 {
9987 	struct drm_connector *connector;
9988 	struct drm_connector_state *conn_state;
9989 	struct amdgpu_dm_connector *aconnector = NULL;
9990 	int i;
9991 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9992 		if (conn_state->crtc != crtc)
9993 			continue;
9994 
9995 		aconnector = to_amdgpu_dm_connector(connector);
9996 		if (!aconnector->port || !aconnector->mst_port)
9997 			aconnector = NULL;
9998 		else
9999 			break;
10000 	}
10001 
10002 	if (!aconnector)
10003 		return 0;
10004 
10005 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10006 }
10007 #endif
10008 
10009 static int validate_overlay(struct drm_atomic_state *state)
10010 {
10011 	int i;
10012 	struct drm_plane *plane;
10013 	struct drm_plane_state *old_plane_state, *new_plane_state;
10014 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10015 
10016 	/* Check if primary plane is contained inside overlay */
10017 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10018 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10019 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10020 				return 0;
10021 
10022 			overlay_state = new_plane_state;
10023 			continue;
10024 		}
10025 	}
10026 
10027 	/* check if we're making changes to the overlay plane */
10028 	if (!overlay_state)
10029 		return 0;
10030 
10031 	/* check if overlay plane is enabled */
10032 	if (!overlay_state->crtc)
10033 		return 0;
10034 
10035 	/* find the primary plane for the CRTC that the overlay is enabled on */
10036 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10037 	if (IS_ERR(primary_state))
10038 		return PTR_ERR(primary_state);
10039 
10040 	/* check if primary plane is enabled */
10041 	if (!primary_state->crtc)
10042 		return 0;
10043 
10044 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10045 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10046 	    primary_state->crtc_y < overlay_state->crtc_y ||
10047 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10048 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10049 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10050 		return -EINVAL;
10051 	}
10052 
10053 	return 0;
10054 }
10055 
10056 /**
10057  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10058  * @dev: The DRM device
10059  * @state: The atomic state to commit
10060  *
10061  * Validate that the given atomic state is programmable by DC into hardware.
10062  * This involves constructing a &struct dc_state reflecting the new hardware
10063  * state we wish to commit, then querying DC to see if it is programmable. It's
10064  * important not to modify the existing DC state. Otherwise, atomic_check
10065  * may unexpectedly commit hardware changes.
10066  *
10067  * When validating the DC state, it's important that the right locks are
10068  * acquired. For full updates case which removes/adds/updates streams on one
10069  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10070  * that any such full update commit will wait for completion of any outstanding
10071  * flip using DRMs synchronization events.
10072  *
10073  * Note that DM adds the affected connectors for all CRTCs in state, when that
10074  * might not seem necessary. This is because DC stream creation requires the
10075  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10076  * be possible but non-trivial - a possible TODO item.
10077  *
10078  * Return: -Error code if validation failed.
10079  */
10080 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10081 				  struct drm_atomic_state *state)
10082 {
10083 	struct amdgpu_device *adev = drm_to_adev(dev);
10084 	struct dm_atomic_state *dm_state = NULL;
10085 	struct dc *dc = adev->dm.dc;
10086 	struct drm_connector *connector;
10087 	struct drm_connector_state *old_con_state, *new_con_state;
10088 	struct drm_crtc *crtc;
10089 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10090 	struct drm_plane *plane;
10091 	struct drm_plane_state *old_plane_state, *new_plane_state;
10092 	enum dc_status status;
10093 	int ret, i;
10094 	bool lock_and_validation_needed = false;
10095 	struct dm_crtc_state *dm_old_crtc_state;
10096 
10097 	trace_amdgpu_dm_atomic_check_begin(state);
10098 
10099 	ret = drm_atomic_helper_check_modeset(dev, state);
10100 	if (ret)
10101 		goto fail;
10102 
10103 	/* Check connector changes */
10104 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10105 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10106 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10107 
10108 		/* Skip connectors that are disabled or part of modeset already. */
10109 		if (!old_con_state->crtc && !new_con_state->crtc)
10110 			continue;
10111 
10112 		if (!new_con_state->crtc)
10113 			continue;
10114 
10115 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10116 		if (IS_ERR(new_crtc_state)) {
10117 			ret = PTR_ERR(new_crtc_state);
10118 			goto fail;
10119 		}
10120 
10121 		if (dm_old_con_state->abm_level !=
10122 		    dm_new_con_state->abm_level)
10123 			new_crtc_state->connectors_changed = true;
10124 	}
10125 
10126 #if defined(CONFIG_DRM_AMD_DC_DCN)
10127 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10128 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10129 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10130 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10131 				if (ret)
10132 					goto fail;
10133 			}
10134 		}
10135 	}
10136 #endif
10137 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10138 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10139 
10140 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10141 		    !new_crtc_state->color_mgmt_changed &&
10142 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10143 			dm_old_crtc_state->dsc_force_changed == false)
10144 			continue;
10145 
10146 		if (!new_crtc_state->enable)
10147 			continue;
10148 
10149 		ret = drm_atomic_add_affected_connectors(state, crtc);
10150 		if (ret)
10151 			return ret;
10152 
10153 		ret = drm_atomic_add_affected_planes(state, crtc);
10154 		if (ret)
10155 			goto fail;
10156 
10157 		if (dm_old_crtc_state->dsc_force_changed)
10158 			new_crtc_state->mode_changed = true;
10159 	}
10160 
10161 	/*
10162 	 * Add all primary and overlay planes on the CRTC to the state
10163 	 * whenever a plane is enabled to maintain correct z-ordering
10164 	 * and to enable fast surface updates.
10165 	 */
10166 	drm_for_each_crtc(crtc, dev) {
10167 		bool modified = false;
10168 
10169 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10170 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10171 				continue;
10172 
10173 			if (new_plane_state->crtc == crtc ||
10174 			    old_plane_state->crtc == crtc) {
10175 				modified = true;
10176 				break;
10177 			}
10178 		}
10179 
10180 		if (!modified)
10181 			continue;
10182 
10183 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10184 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10185 				continue;
10186 
10187 			new_plane_state =
10188 				drm_atomic_get_plane_state(state, plane);
10189 
10190 			if (IS_ERR(new_plane_state)) {
10191 				ret = PTR_ERR(new_plane_state);
10192 				goto fail;
10193 			}
10194 		}
10195 	}
10196 
10197 	/* Remove exiting planes if they are modified */
10198 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10199 		ret = dm_update_plane_state(dc, state, plane,
10200 					    old_plane_state,
10201 					    new_plane_state,
10202 					    false,
10203 					    &lock_and_validation_needed);
10204 		if (ret)
10205 			goto fail;
10206 	}
10207 
10208 	/* Disable all crtcs which require disable */
10209 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10210 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10211 					   old_crtc_state,
10212 					   new_crtc_state,
10213 					   false,
10214 					   &lock_and_validation_needed);
10215 		if (ret)
10216 			goto fail;
10217 	}
10218 
10219 	/* Enable all crtcs which require enable */
10220 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10221 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10222 					   old_crtc_state,
10223 					   new_crtc_state,
10224 					   true,
10225 					   &lock_and_validation_needed);
10226 		if (ret)
10227 			goto fail;
10228 	}
10229 
10230 	ret = validate_overlay(state);
10231 	if (ret)
10232 		goto fail;
10233 
10234 	/* Add new/modified planes */
10235 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10236 		ret = dm_update_plane_state(dc, state, plane,
10237 					    old_plane_state,
10238 					    new_plane_state,
10239 					    true,
10240 					    &lock_and_validation_needed);
10241 		if (ret)
10242 			goto fail;
10243 	}
10244 
10245 	/* Run this here since we want to validate the streams we created */
10246 	ret = drm_atomic_helper_check_planes(dev, state);
10247 	if (ret)
10248 		goto fail;
10249 
10250 	/* Check cursor planes scaling */
10251 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10252 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10253 		if (ret)
10254 			goto fail;
10255 	}
10256 
10257 	if (state->legacy_cursor_update) {
10258 		/*
10259 		 * This is a fast cursor update coming from the plane update
10260 		 * helper, check if it can be done asynchronously for better
10261 		 * performance.
10262 		 */
10263 		state->async_update =
10264 			!drm_atomic_helper_async_check(dev, state);
10265 
10266 		/*
10267 		 * Skip the remaining global validation if this is an async
10268 		 * update. Cursor updates can be done without affecting
10269 		 * state or bandwidth calcs and this avoids the performance
10270 		 * penalty of locking the private state object and
10271 		 * allocating a new dc_state.
10272 		 */
10273 		if (state->async_update)
10274 			return 0;
10275 	}
10276 
10277 	/* Check scaling and underscan changes*/
10278 	/* TODO Removed scaling changes validation due to inability to commit
10279 	 * new stream into context w\o causing full reset. Need to
10280 	 * decide how to handle.
10281 	 */
10282 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10283 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10284 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10285 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10286 
10287 		/* Skip any modesets/resets */
10288 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10289 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10290 			continue;
10291 
10292 		/* Skip any thing not scale or underscan changes */
10293 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10294 			continue;
10295 
10296 		lock_and_validation_needed = true;
10297 	}
10298 
10299 	/**
10300 	 * Streams and planes are reset when there are changes that affect
10301 	 * bandwidth. Anything that affects bandwidth needs to go through
10302 	 * DC global validation to ensure that the configuration can be applied
10303 	 * to hardware.
10304 	 *
10305 	 * We have to currently stall out here in atomic_check for outstanding
10306 	 * commits to finish in this case because our IRQ handlers reference
10307 	 * DRM state directly - we can end up disabling interrupts too early
10308 	 * if we don't.
10309 	 *
10310 	 * TODO: Remove this stall and drop DM state private objects.
10311 	 */
10312 	if (lock_and_validation_needed) {
10313 		ret = dm_atomic_get_state(state, &dm_state);
10314 		if (ret)
10315 			goto fail;
10316 
10317 		ret = do_aquire_global_lock(dev, state);
10318 		if (ret)
10319 			goto fail;
10320 
10321 #if defined(CONFIG_DRM_AMD_DC_DCN)
10322 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10323 			goto fail;
10324 
10325 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10326 		if (ret)
10327 			goto fail;
10328 #endif
10329 
10330 		/*
10331 		 * Perform validation of MST topology in the state:
10332 		 * We need to perform MST atomic check before calling
10333 		 * dc_validate_global_state(), or there is a chance
10334 		 * to get stuck in an infinite loop and hang eventually.
10335 		 */
10336 		ret = drm_dp_mst_atomic_check(state);
10337 		if (ret)
10338 			goto fail;
10339 		status = dc_validate_global_state(dc, dm_state->context, false);
10340 		if (status != DC_OK) {
10341 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10342 				       dc_status_to_str(status), status);
10343 			ret = -EINVAL;
10344 			goto fail;
10345 		}
10346 	} else {
10347 		/*
10348 		 * The commit is a fast update. Fast updates shouldn't change
10349 		 * the DC context, affect global validation, and can have their
10350 		 * commit work done in parallel with other commits not touching
10351 		 * the same resource. If we have a new DC context as part of
10352 		 * the DM atomic state from validation we need to free it and
10353 		 * retain the existing one instead.
10354 		 *
10355 		 * Furthermore, since the DM atomic state only contains the DC
10356 		 * context and can safely be annulled, we can free the state
10357 		 * and clear the associated private object now to free
10358 		 * some memory and avoid a possible use-after-free later.
10359 		 */
10360 
10361 		for (i = 0; i < state->num_private_objs; i++) {
10362 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10363 
10364 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10365 				int j = state->num_private_objs-1;
10366 
10367 				dm_atomic_destroy_state(obj,
10368 						state->private_objs[i].state);
10369 
10370 				/* If i is not at the end of the array then the
10371 				 * last element needs to be moved to where i was
10372 				 * before the array can safely be truncated.
10373 				 */
10374 				if (i != j)
10375 					state->private_objs[i] =
10376 						state->private_objs[j];
10377 
10378 				state->private_objs[j].ptr = NULL;
10379 				state->private_objs[j].state = NULL;
10380 				state->private_objs[j].old_state = NULL;
10381 				state->private_objs[j].new_state = NULL;
10382 
10383 				state->num_private_objs = j;
10384 				break;
10385 			}
10386 		}
10387 	}
10388 
10389 	/* Store the overall update type for use later in atomic check. */
10390 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10391 		struct dm_crtc_state *dm_new_crtc_state =
10392 			to_dm_crtc_state(new_crtc_state);
10393 
10394 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10395 							 UPDATE_TYPE_FULL :
10396 							 UPDATE_TYPE_FAST;
10397 	}
10398 
10399 	/* Must be success */
10400 	WARN_ON(ret);
10401 
10402 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10403 
10404 	return ret;
10405 
10406 fail:
10407 	if (ret == -EDEADLK)
10408 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10409 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10410 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10411 	else
10412 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10413 
10414 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10415 
10416 	return ret;
10417 }
10418 
10419 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10420 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10421 {
10422 	uint8_t dpcd_data;
10423 	bool capable = false;
10424 
10425 	if (amdgpu_dm_connector->dc_link &&
10426 		dm_helpers_dp_read_dpcd(
10427 				NULL,
10428 				amdgpu_dm_connector->dc_link,
10429 				DP_DOWN_STREAM_PORT_COUNT,
10430 				&dpcd_data,
10431 				sizeof(dpcd_data))) {
10432 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10433 	}
10434 
10435 	return capable;
10436 }
10437 
10438 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10439 		uint8_t *edid_ext, int len,
10440 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10441 {
10442 	int i;
10443 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10444 	struct dc *dc = adev->dm.dc;
10445 
10446 	/* send extension block to DMCU for parsing */
10447 	for (i = 0; i < len; i += 8) {
10448 		bool res;
10449 		int offset;
10450 
10451 		/* send 8 bytes a time */
10452 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10453 			return false;
10454 
10455 		if (i+8 == len) {
10456 			/* EDID block sent completed, expect result */
10457 			int version, min_rate, max_rate;
10458 
10459 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10460 			if (res) {
10461 				/* amd vsdb found */
10462 				vsdb_info->freesync_supported = 1;
10463 				vsdb_info->amd_vsdb_version = version;
10464 				vsdb_info->min_refresh_rate_hz = min_rate;
10465 				vsdb_info->max_refresh_rate_hz = max_rate;
10466 				return true;
10467 			}
10468 			/* not amd vsdb */
10469 			return false;
10470 		}
10471 
10472 		/* check for ack*/
10473 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10474 		if (!res)
10475 			return false;
10476 	}
10477 
10478 	return false;
10479 }
10480 
10481 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10482 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10483 {
10484 	uint8_t *edid_ext = NULL;
10485 	int i;
10486 	bool valid_vsdb_found = false;
10487 
10488 	/*----- drm_find_cea_extension() -----*/
10489 	/* No EDID or EDID extensions */
10490 	if (edid == NULL || edid->extensions == 0)
10491 		return -ENODEV;
10492 
10493 	/* Find CEA extension */
10494 	for (i = 0; i < edid->extensions; i++) {
10495 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10496 		if (edid_ext[0] == CEA_EXT)
10497 			break;
10498 	}
10499 
10500 	if (i == edid->extensions)
10501 		return -ENODEV;
10502 
10503 	/*----- cea_db_offsets() -----*/
10504 	if (edid_ext[0] != CEA_EXT)
10505 		return -ENODEV;
10506 
10507 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10508 
10509 	return valid_vsdb_found ? i : -ENODEV;
10510 }
10511 
10512 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10513 					struct edid *edid)
10514 {
10515 	int i = 0;
10516 	struct detailed_timing *timing;
10517 	struct detailed_non_pixel *data;
10518 	struct detailed_data_monitor_range *range;
10519 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10520 			to_amdgpu_dm_connector(connector);
10521 	struct dm_connector_state *dm_con_state = NULL;
10522 
10523 	struct drm_device *dev = connector->dev;
10524 	struct amdgpu_device *adev = drm_to_adev(dev);
10525 	bool freesync_capable = false;
10526 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10527 
10528 	if (!connector->state) {
10529 		DRM_ERROR("%s - Connector has no state", __func__);
10530 		goto update;
10531 	}
10532 
10533 	if (!edid) {
10534 		dm_con_state = to_dm_connector_state(connector->state);
10535 
10536 		amdgpu_dm_connector->min_vfreq = 0;
10537 		amdgpu_dm_connector->max_vfreq = 0;
10538 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10539 
10540 		goto update;
10541 	}
10542 
10543 	dm_con_state = to_dm_connector_state(connector->state);
10544 
10545 	if (!amdgpu_dm_connector->dc_sink) {
10546 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10547 		goto update;
10548 	}
10549 	if (!adev->dm.freesync_module)
10550 		goto update;
10551 
10552 
10553 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10554 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10555 		bool edid_check_required = false;
10556 
10557 		if (edid) {
10558 			edid_check_required = is_dp_capable_without_timing_msa(
10559 						adev->dm.dc,
10560 						amdgpu_dm_connector);
10561 		}
10562 
10563 		if (edid_check_required == true && (edid->version > 1 ||
10564 		   (edid->version == 1 && edid->revision > 1))) {
10565 			for (i = 0; i < 4; i++) {
10566 
10567 				timing	= &edid->detailed_timings[i];
10568 				data	= &timing->data.other_data;
10569 				range	= &data->data.range;
10570 				/*
10571 				 * Check if monitor has continuous frequency mode
10572 				 */
10573 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10574 					continue;
10575 				/*
10576 				 * Check for flag range limits only. If flag == 1 then
10577 				 * no additional timing information provided.
10578 				 * Default GTF, GTF Secondary curve and CVT are not
10579 				 * supported
10580 				 */
10581 				if (range->flags != 1)
10582 					continue;
10583 
10584 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10585 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10586 				amdgpu_dm_connector->pixel_clock_mhz =
10587 					range->pixel_clock_mhz * 10;
10588 
10589 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10590 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10591 
10592 				break;
10593 			}
10594 
10595 			if (amdgpu_dm_connector->max_vfreq -
10596 			    amdgpu_dm_connector->min_vfreq > 10) {
10597 
10598 				freesync_capable = true;
10599 			}
10600 		}
10601 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10602 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10603 		if (i >= 0 && vsdb_info.freesync_supported) {
10604 			timing  = &edid->detailed_timings[i];
10605 			data    = &timing->data.other_data;
10606 
10607 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10608 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10609 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10610 				freesync_capable = true;
10611 
10612 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10613 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10614 		}
10615 	}
10616 
10617 update:
10618 	if (dm_con_state)
10619 		dm_con_state->freesync_capable = freesync_capable;
10620 
10621 	if (connector->vrr_capable_property)
10622 		drm_connector_set_vrr_capable_property(connector,
10623 						       freesync_capable);
10624 }
10625 
10626 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10627 {
10628 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10629 
10630 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10631 		return;
10632 	if (link->type == dc_connection_none)
10633 		return;
10634 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10635 					dpcd_data, sizeof(dpcd_data))) {
10636 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10637 
10638 		if (dpcd_data[0] == 0) {
10639 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10640 			link->psr_settings.psr_feature_enabled = false;
10641 		} else {
10642 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10643 			link->psr_settings.psr_feature_enabled = true;
10644 		}
10645 
10646 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10647 	}
10648 }
10649 
10650 /*
10651  * amdgpu_dm_link_setup_psr() - configure psr link
10652  * @stream: stream state
10653  *
10654  * Return: true if success
10655  */
10656 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10657 {
10658 	struct dc_link *link = NULL;
10659 	struct psr_config psr_config = {0};
10660 	struct psr_context psr_context = {0};
10661 	bool ret = false;
10662 
10663 	if (stream == NULL)
10664 		return false;
10665 
10666 	link = stream->link;
10667 
10668 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10669 
10670 	if (psr_config.psr_version > 0) {
10671 		psr_config.psr_exit_link_training_required = 0x1;
10672 		psr_config.psr_frame_capture_indication_req = 0;
10673 		psr_config.psr_rfb_setup_time = 0x37;
10674 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10675 		psr_config.allow_smu_optimizations = 0x0;
10676 
10677 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10678 
10679 	}
10680 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10681 
10682 	return ret;
10683 }
10684 
10685 /*
10686  * amdgpu_dm_psr_enable() - enable psr f/w
10687  * @stream: stream state
10688  *
10689  * Return: true if success
10690  */
10691 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10692 {
10693 	struct dc_link *link = stream->link;
10694 	unsigned int vsync_rate_hz = 0;
10695 	struct dc_static_screen_params params = {0};
10696 	/* Calculate number of static frames before generating interrupt to
10697 	 * enter PSR.
10698 	 */
10699 	// Init fail safe of 2 frames static
10700 	unsigned int num_frames_static = 2;
10701 
10702 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10703 
10704 	vsync_rate_hz = div64_u64(div64_u64((
10705 			stream->timing.pix_clk_100hz * 100),
10706 			stream->timing.v_total),
10707 			stream->timing.h_total);
10708 
10709 	/* Round up
10710 	 * Calculate number of frames such that at least 30 ms of time has
10711 	 * passed.
10712 	 */
10713 	if (vsync_rate_hz != 0) {
10714 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10715 		num_frames_static = (30000 / frame_time_microsec) + 1;
10716 	}
10717 
10718 	params.triggers.cursor_update = true;
10719 	params.triggers.overlay_update = true;
10720 	params.triggers.surface_update = true;
10721 	params.num_frames = num_frames_static;
10722 
10723 	dc_stream_set_static_screen_params(link->ctx->dc,
10724 					   &stream, 1,
10725 					   &params);
10726 
10727 	return dc_link_set_psr_allow_active(link, true, false, false);
10728 }
10729 
10730 /*
10731  * amdgpu_dm_psr_disable() - disable psr f/w
10732  * @stream:  stream state
10733  *
10734  * Return: true if success
10735  */
10736 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10737 {
10738 
10739 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10740 
10741 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10742 }
10743 
10744 /*
10745  * amdgpu_dm_psr_disable() - disable psr f/w
10746  * if psr is enabled on any stream
10747  *
10748  * Return: true if success
10749  */
10750 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10751 {
10752 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10753 	return dc_set_psr_allow_active(dm->dc, false);
10754 }
10755 
10756 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10757 {
10758 	struct amdgpu_device *adev = drm_to_adev(dev);
10759 	struct dc *dc = adev->dm.dc;
10760 	int i;
10761 
10762 	mutex_lock(&adev->dm.dc_lock);
10763 	if (dc->current_state) {
10764 		for (i = 0; i < dc->current_state->stream_count; ++i)
10765 			dc->current_state->streams[i]
10766 				->triggered_crtc_reset.enabled =
10767 				adev->dm.force_timing_sync;
10768 
10769 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10770 		dc_trigger_sync(dc, dc->current_state);
10771 	}
10772 	mutex_unlock(&adev->dm.dc_lock);
10773 }
10774 
10775 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10776 		       uint32_t value, const char *func_name)
10777 {
10778 #ifdef DM_CHECK_ADDR_0
10779 	if (address == 0) {
10780 		DC_ERR("invalid register write. address = 0");
10781 		return;
10782 	}
10783 #endif
10784 	cgs_write_register(ctx->cgs_device, address, value);
10785 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10786 }
10787 
10788 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10789 			  const char *func_name)
10790 {
10791 	uint32_t value;
10792 #ifdef DM_CHECK_ADDR_0
10793 	if (address == 0) {
10794 		DC_ERR("invalid register read; address = 0\n");
10795 		return 0;
10796 	}
10797 #endif
10798 
10799 	if (ctx->dmub_srv &&
10800 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10801 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10802 		ASSERT(false);
10803 		return 0;
10804 	}
10805 
10806 	value = cgs_read_register(ctx->cgs_device, address);
10807 
10808 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10809 
10810 	return value;
10811 }
10812 
10813 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10814 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10815 {
10816 	struct amdgpu_device *adev = ctx->driver_context;
10817 	int ret = 0;
10818 
10819 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10820 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10821 	if (ret == 0) {
10822 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10823 		return -1;
10824 	}
10825 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10826 
10827 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10828 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10829 
10830 		// For read case, Copy data to payload
10831 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10832 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10833 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10834 			adev->dm.dmub_notify->aux_reply.length);
10835 	}
10836 
10837 	return adev->dm.dmub_notify->aux_reply.length;
10838 }
10839