xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 9c39c6ffe0c2945c7cf814814c096bc23b63f53d)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 				 struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 	if (crtc >= adev->mode_info.num_crtc)
235 		return 0;
236 	else {
237 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 
239 		if (acrtc->dm_irq_params.stream == NULL) {
240 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 				  crtc);
242 			return 0;
243 		}
244 
245 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 	}
247 }
248 
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 				  u32 *vbl, u32 *position)
251 {
252 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 
254 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 		return -EINVAL;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream ==  NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		/*
266 		 * TODO rework base driver to use values directly.
267 		 * for now parse it back into reg-format
268 		 */
269 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 					 &v_blank_start,
271 					 &v_blank_end,
272 					 &h_position,
273 					 &v_position);
274 
275 		*position = v_position | (h_position << 16);
276 		*vbl = v_blank_start | (v_blank_end << 16);
277 	}
278 
279 	return 0;
280 }
281 
282 static bool dm_is_idle(void *handle)
283 {
284 	/* XXX todo */
285 	return true;
286 }
287 
288 static int dm_wait_for_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return 0;
292 }
293 
294 static bool dm_check_soft_reset(void *handle)
295 {
296 	return false;
297 }
298 
299 static int dm_soft_reset(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 		     int otg_inst)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_crtc *crtc;
311 	struct amdgpu_crtc *amdgpu_crtc;
312 
313 	if (otg_inst == -1) {
314 		WARN_ON(1);
315 		return adev->mode_info.crtcs[0];
316 	}
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	if (!e)
396 		WARN_ON(1);
397 
398 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 
400 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 	if (!vrr_active ||
402 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 				      &v_blank_end, &hpos, &vpos) ||
404 	    (vpos < v_blank_start)) {
405 		/* Update to correct count and vblank timestamp if racing with
406 		 * vblank irq. This also updates to the correct vblank timestamp
407 		 * even in VRR mode, as scanout is past the front-porch atm.
408 		 */
409 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 
411 		/* Wake up userspace by sending the pageflip event with proper
412 		 * count and timestamp of vblank of flip completion.
413 		 */
414 		if (e) {
415 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 
417 			/* Event sent, so done with vblank for this flip */
418 			drm_crtc_vblank_put(&amdgpu_crtc->base);
419 		}
420 	} else if (e) {
421 		/* VRR active and inside front-porch: vblank count and
422 		 * timestamp for pageflip event will only be up to date after
423 		 * drm_crtc_handle_vblank() has been executed from late vblank
424 		 * irq handler after start of back-porch (vline 0). We queue the
425 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 		 * updated timestamp and count, once it runs after us.
427 		 *
428 		 * We need to open-code this instead of using the helper
429 		 * drm_crtc_arm_vblank_event(), as that helper would
430 		 * call drm_crtc_accurate_vblank_count(), which we must
431 		 * not call in VRR mode while we are in front-porch!
432 		 */
433 
434 		/* sequence will be replaced by real count during send-out. */
435 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 		e->pipe = amdgpu_crtc->crtc_id;
437 
438 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 		e = NULL;
440 	}
441 
442 	/* Keep track of vblank of this flip for flip throttling. We use the
443 	 * cooked hw counter, as that one incremented at start of this vblank
444 	 * of pageflip completion, so last_flip_vblank is the forbidden count
445 	 * for queueing new pageflips if vsync + VRR is enabled.
446 	 */
447 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 
450 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 
453 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
455 		     vrr_active, (int) !e);
456 }
457 
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 	struct common_irq_params *irq_params = interrupt_params;
461 	struct amdgpu_device *adev = irq_params->adev;
462 	struct amdgpu_crtc *acrtc;
463 	struct drm_device *drm_dev;
464 	struct drm_vblank_crtc *vblank;
465 	ktime_t frame_duration_ns, previous_timestamp;
466 	unsigned long flags;
467 	int vrr_active;
468 
469 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470 
471 	if (acrtc) {
472 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 		drm_dev = acrtc->base.dev;
474 		vblank = &drm_dev->vblank[acrtc->base.index];
475 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 		frame_duration_ns = vblank->time - previous_timestamp;
477 
478 		if (frame_duration_ns > 0) {
479 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 						frame_duration_ns,
481 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
483 		}
484 
485 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486 			      acrtc->crtc_id,
487 			      vrr_active);
488 
489 		/* Core vblank handling is done here after end of front-porch in
490 		 * vrr mode, as vblank timestamping will give valid results
491 		 * while now done after front-porch. This will also deliver
492 		 * page-flip completion events that have been queued to us
493 		 * if a pageflip happened inside front-porch.
494 		 */
495 		if (vrr_active) {
496 			drm_crtc_handle_vblank(&acrtc->base);
497 
498 			/* BTR processing for pre-DCE12 ASICs */
499 			if (acrtc->dm_irq_params.stream &&
500 			    adev->family < AMDGPU_FAMILY_AI) {
501 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 				mod_freesync_handle_v_update(
503 				    adev->dm.freesync_module,
504 				    acrtc->dm_irq_params.stream,
505 				    &acrtc->dm_irq_params.vrr_params);
506 
507 				dc_stream_adjust_vmin_vmax(
508 				    adev->dm.dc,
509 				    acrtc->dm_irq_params.stream,
510 				    &acrtc->dm_irq_params.vrr_params.adjust);
511 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512 			}
513 		}
514 	}
515 }
516 
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526 	struct common_irq_params *irq_params = interrupt_params;
527 	struct amdgpu_device *adev = irq_params->adev;
528 	struct amdgpu_crtc *acrtc;
529 	unsigned long flags;
530 	int vrr_active;
531 
532 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533 	if (!acrtc)
534 		return;
535 
536 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537 
538 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 		      vrr_active, acrtc->dm_irq_params.active_planes);
540 
541 	/**
542 	 * Core vblank handling at start of front-porch is only possible
543 	 * in non-vrr mode, as only there vblank timestamping will give
544 	 * valid results while done in front-porch. Otherwise defer it
545 	 * to dm_vupdate_high_irq after end of front-porch.
546 	 */
547 	if (!vrr_active)
548 		drm_crtc_handle_vblank(&acrtc->base);
549 
550 	/**
551 	 * Following stuff must happen at start of vblank, for crc
552 	 * computation and below-the-range btr support in vrr mode.
553 	 */
554 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555 
556 	/* BTR updates need to happen before VUPDATE on Vega and above. */
557 	if (adev->family < AMDGPU_FAMILY_AI)
558 		return;
559 
560 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561 
562 	if (acrtc->dm_irq_params.stream &&
563 	    acrtc->dm_irq_params.vrr_params.supported &&
564 	    acrtc->dm_irq_params.freesync_config.state ==
565 		    VRR_STATE_ACTIVE_VARIABLE) {
566 		mod_freesync_handle_v_update(adev->dm.freesync_module,
567 					     acrtc->dm_irq_params.stream,
568 					     &acrtc->dm_irq_params.vrr_params);
569 
570 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 					   &acrtc->dm_irq_params.vrr_params.adjust);
572 	}
573 
574 	/*
575 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 	 * In that case, pageflip completion interrupts won't fire and pageflip
577 	 * completion events won't get delivered. Prevent this by sending
578 	 * pending pageflip events from here if a flip is still pending.
579 	 *
580 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 	 * avoid race conditions between flip programming and completion,
582 	 * which could cause too early flip completion events.
583 	 */
584 	if (adev->family >= AMDGPU_FAMILY_RV &&
585 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 	    acrtc->dm_irq_params.active_planes == 0) {
587 		if (acrtc->event) {
588 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 			acrtc->event = NULL;
590 			drm_crtc_vblank_put(&acrtc->base);
591 		}
592 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
593 	}
594 
595 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597 
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609 	struct common_irq_params *irq_params = interrupt_params;
610 	struct amdgpu_device *adev = irq_params->adev;
611 	struct amdgpu_crtc *acrtc;
612 
613 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614 
615 	if (!acrtc)
616 		return;
617 
618 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622 
623 static int dm_set_clockgating_state(void *handle,
624 		  enum amd_clockgating_state state)
625 {
626 	return 0;
627 }
628 
629 static int dm_set_powergating_state(void *handle,
630 		  enum amd_powergating_state state)
631 {
632 	return 0;
633 }
634 
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637 
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641 	struct drm_device *dev = connector->dev;
642 	struct amdgpu_device *adev = drm_to_adev(dev);
643 	struct dm_compressor_info *compressor = &adev->dm.compressor;
644 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 	struct drm_display_mode *mode;
646 	unsigned long max_size = 0;
647 
648 	if (adev->dm.dc->fbc_compressor == NULL)
649 		return;
650 
651 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652 		return;
653 
654 	if (compressor->bo_ptr)
655 		return;
656 
657 
658 	list_for_each_entry(mode, &connector->modes, head) {
659 		if (max_size < mode->htotal * mode->vtotal)
660 			max_size = mode->htotal * mode->vtotal;
661 	}
662 
663 	if (max_size) {
664 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 			    &compressor->gpu_addr, &compressor->cpu_addr);
667 
668 		if (r)
669 			DRM_ERROR("DM: Failed to initialize FBC\n");
670 		else {
671 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673 		}
674 
675 	}
676 
677 }
678 
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 					  int pipe, bool *enabled,
681 					  unsigned char *buf, int max_bytes)
682 {
683 	struct drm_device *dev = dev_get_drvdata(kdev);
684 	struct amdgpu_device *adev = drm_to_adev(dev);
685 	struct drm_connector *connector;
686 	struct drm_connector_list_iter conn_iter;
687 	struct amdgpu_dm_connector *aconnector;
688 	int ret = 0;
689 
690 	*enabled = false;
691 
692 	mutex_lock(&adev->dm.audio_lock);
693 
694 	drm_connector_list_iter_begin(dev, &conn_iter);
695 	drm_for_each_connector_iter(connector, &conn_iter) {
696 		aconnector = to_amdgpu_dm_connector(connector);
697 		if (aconnector->audio_inst != port)
698 			continue;
699 
700 		*enabled = true;
701 		ret = drm_eld_size(connector->eld);
702 		memcpy(buf, connector->eld, min(max_bytes, ret));
703 
704 		break;
705 	}
706 	drm_connector_list_iter_end(&conn_iter);
707 
708 	mutex_unlock(&adev->dm.audio_lock);
709 
710 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711 
712 	return ret;
713 }
714 
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 	.get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718 
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 				       struct device *hda_kdev, void *data)
721 {
722 	struct drm_device *dev = dev_get_drvdata(kdev);
723 	struct amdgpu_device *adev = drm_to_adev(dev);
724 	struct drm_audio_component *acomp = data;
725 
726 	acomp->ops = &amdgpu_dm_audio_component_ops;
727 	acomp->dev = kdev;
728 	adev->dm.audio_component = acomp;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 					  struct device *hda_kdev, void *data)
735 {
736 	struct drm_device *dev = dev_get_drvdata(kdev);
737 	struct amdgpu_device *adev = drm_to_adev(dev);
738 	struct drm_audio_component *acomp = data;
739 
740 	acomp->ops = NULL;
741 	acomp->dev = NULL;
742 	adev->dm.audio_component = NULL;
743 }
744 
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 	.bind	= amdgpu_dm_audio_component_bind,
747 	.unbind	= amdgpu_dm_audio_component_unbind,
748 };
749 
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752 	int i, ret;
753 
754 	if (!amdgpu_audio)
755 		return 0;
756 
757 	adev->mode_info.audio.enabled = true;
758 
759 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760 
761 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 		adev->mode_info.audio.pin[i].channels = -1;
763 		adev->mode_info.audio.pin[i].rate = -1;
764 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 		adev->mode_info.audio.pin[i].status_bits = 0;
766 		adev->mode_info.audio.pin[i].category_code = 0;
767 		adev->mode_info.audio.pin[i].connected = false;
768 		adev->mode_info.audio.pin[i].id =
769 			adev->dm.dc->res_pool->audios[i]->inst;
770 		adev->mode_info.audio.pin[i].offset = 0;
771 	}
772 
773 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774 	if (ret < 0)
775 		return ret;
776 
777 	adev->dm.audio_registered = true;
778 
779 	return 0;
780 }
781 
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784 	if (!amdgpu_audio)
785 		return;
786 
787 	if (!adev->mode_info.audio.enabled)
788 		return;
789 
790 	if (adev->dm.audio_registered) {
791 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 		adev->dm.audio_registered = false;
793 	}
794 
795 	/* TODO: Disable audio? */
796 
797 	adev->mode_info.audio.enabled = false;
798 }
799 
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802 	struct drm_audio_component *acomp = adev->dm.audio_component;
803 
804 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806 
807 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808 						 pin, -1);
809 	}
810 }
811 
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814 	const struct dmcub_firmware_header_v1_0 *hdr;
815 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 	struct abm *abm = adev->dm.dc->res_pool->abm;
820 	struct dmub_srv_hw_params hw_params;
821 	enum dmub_status status;
822 	const unsigned char *fw_inst_const, *fw_bss_data;
823 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
824 	bool has_hw_support;
825 
826 	if (!dmub_srv)
827 		/* DMUB isn't supported on the ASIC. */
828 		return 0;
829 
830 	if (!fb_info) {
831 		DRM_ERROR("No framebuffer info for DMUB service.\n");
832 		return -EINVAL;
833 	}
834 
835 	if (!dmub_fw) {
836 		/* Firmware required for DMUB support. */
837 		DRM_ERROR("No firmware provided for DMUB.\n");
838 		return -EINVAL;
839 	}
840 
841 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 	if (status != DMUB_STATUS_OK) {
843 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844 		return -EINVAL;
845 	}
846 
847 	if (!has_hw_support) {
848 		DRM_INFO("DMUB unsupported on ASIC\n");
849 		return 0;
850 	}
851 
852 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853 
854 	fw_inst_const = dmub_fw->data +
855 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856 			PSP_HEADER_BYTES;
857 
858 	fw_bss_data = dmub_fw->data +
859 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 		      le32_to_cpu(hdr->inst_const_bytes);
861 
862 	/* Copy firmware and bios info into FB memory. */
863 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865 
866 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867 
868 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 	 * amdgpu_ucode_init_single_fw will load dmub firmware
870 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 	 * will be done by dm_dmub_hw_init
872 	 */
873 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875 				fw_inst_const_size);
876 	}
877 
878 	if (fw_bss_data_size)
879 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 		       fw_bss_data, fw_bss_data_size);
881 
882 	/* Copy firmware bios info into FB memory. */
883 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884 	       adev->bios_size);
885 
886 	/* Reset regions that need to be reset. */
887 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889 
890 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892 
893 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895 
896 	/* Initialize hardware. */
897 	memset(&hw_params, 0, sizeof(hw_params));
898 	hw_params.fb_base = adev->gmc.fb_start;
899 	hw_params.fb_offset = adev->gmc.aper_base;
900 
901 	/* backdoor load firmware and trigger dmub running */
902 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 		hw_params.load_inst_const = true;
904 
905 	if (dmcu)
906 		hw_params.psp_version = dmcu->psp_version;
907 
908 	for (i = 0; i < fb_info->num_fb; ++i)
909 		hw_params.fb[i] = &fb_info->fb[i];
910 
911 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 	if (status != DMUB_STATUS_OK) {
913 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914 		return -EINVAL;
915 	}
916 
917 	/* Wait for firmware load to finish. */
918 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 	if (status != DMUB_STATUS_OK)
920 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921 
922 	/* Init DMCU and ABM if available. */
923 	if (dmcu && abm) {
924 		dmcu->funcs->dmcu_init(dmcu);
925 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926 	}
927 
928 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 	if (!adev->dm.dc->ctx->dmub_srv) {
930 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931 		return -ENOMEM;
932 	}
933 
934 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 		 adev->dm.dmcub_fw_version);
936 
937 	return 0;
938 }
939 
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944 	struct common_irq_params *irq_params = interrupt_params;
945 	struct amdgpu_device *adev = irq_params->adev;
946 	struct amdgpu_display_manager *dm = &adev->dm;
947 	struct dmcub_trace_buf_entry entry = { 0 };
948 	uint32_t count = 0;
949 
950 	do {
951 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 							entry.param0, entry.param1);
954 
955 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957 		} else
958 			break;
959 
960 		count++;
961 
962 	} while (count <= DMUB_TRACE_MAX_READ);
963 
964 	ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966 
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969 	uint64_t pt_base;
970 	uint32_t logical_addr_low;
971 	uint32_t logical_addr_high;
972 	uint32_t agp_base, agp_bot, agp_top;
973 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974 
975 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977 
978 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979 		/*
980 		 * Raven2 has a HW issue that it is unable to use the vram which
981 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 		 * workaround that increase system aperture high address (add 1)
983 		 * to get rid of the VM fault and hardware hang.
984 		 */
985 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986 	else
987 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988 
989 	agp_base = 0;
990 	agp_bot = adev->gmc.agp_start >> 24;
991 	agp_top = adev->gmc.agp_end >> 24;
992 
993 
994 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 	page_table_base.low_part = lower_32_bits(pt_base);
1000 
1001 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003 
1004 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007 
1008 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011 
1012 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015 
1016 	pa_config->is_hvm_enabled = 0;
1017 
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023 
1024 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 	struct amdgpu_display_manager *dm = vblank_work->dm;
1026 
1027 	mutex_lock(&dm->dc_lock);
1028 
1029 	if (vblank_work->enable)
1030 		dm->active_vblank_irq_count++;
1031 	else if(dm->active_vblank_irq_count)
1032 		dm->active_vblank_irq_count--;
1033 
1034 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035 
1036 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037 
1038 	mutex_unlock(&dm->dc_lock);
1039 }
1040 
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043 
1044 	int max_caps = dc->caps.max_links;
1045 	struct vblank_workqueue *vblank_work;
1046 	int i = 0;
1047 
1048 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1050 		kfree(vblank_work);
1051 		return NULL;
1052 	}
1053 
1054 	for (i = 0; i < max_caps; i++)
1055 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056 
1057 	return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062 	struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 	struct dc_callback_init init_params;
1065 #endif
1066 	int r;
1067 
1068 	adev->dm.ddev = adev_to_drm(adev);
1069 	adev->dm.adev = adev;
1070 
1071 	/* Zero all the fields */
1072 	memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 	memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076 
1077 	mutex_init(&adev->dm.dc_lock);
1078 	mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 	spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082 
1083 	if(amdgpu_dm_irq_init(adev)) {
1084 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085 		goto error;
1086 	}
1087 
1088 	init_data.asic_id.chip_family = adev->family;
1089 
1090 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092 
1093 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 	init_data.asic_id.atombios_base_address =
1096 		adev->mode_info.atom_context->bios;
1097 
1098 	init_data.driver = adev;
1099 
1100 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101 
1102 	if (!adev->dm.cgs_device) {
1103 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104 		goto error;
1105 	}
1106 
1107 	init_data.cgs_device = adev->dm.cgs_device;
1108 
1109 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110 
1111 	switch (adev->asic_type) {
1112 	case CHIP_CARRIZO:
1113 	case CHIP_STONEY:
1114 	case CHIP_RAVEN:
1115 	case CHIP_RENOIR:
1116 		init_data.flags.gpu_vm_support = true;
1117 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 			init_data.flags.disable_dmcu = true;
1119 		break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121 	case CHIP_VANGOGH:
1122 		init_data.flags.gpu_vm_support = true;
1123 		break;
1124 #endif
1125 	default:
1126 		break;
1127 	}
1128 
1129 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 		init_data.flags.fbc_support = true;
1131 
1132 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 		init_data.flags.multi_mon_pp_mclk_switch = true;
1134 
1135 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 		init_data.flags.disable_fractional_pwm = true;
1137 
1138 	init_data.flags.power_down_display_on_boot = true;
1139 
1140 	INIT_LIST_HEAD(&adev->dm.da_list);
1141 	/* Display Core create. */
1142 	adev->dm.dc = dc_create(&init_data);
1143 
1144 	if (adev->dm.dc) {
1145 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146 	} else {
1147 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148 		goto error;
1149 	}
1150 
1151 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154 	}
1155 
1156 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158 
1159 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 		adev->dm.dc->debug.disable_stutter = true;
1161 
1162 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 		adev->dm.dc->debug.disable_dsc = true;
1164 
1165 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 		adev->dm.dc->debug.disable_clock_gate = true;
1167 
1168 	r = dm_dmub_hw_init(adev);
1169 	if (r) {
1170 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171 		goto error;
1172 	}
1173 
1174 	dc_hardware_init(adev->dm.dc);
1175 
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 	if (adev->apu_flags) {
1178 		struct dc_phy_addr_space_config pa_config;
1179 
1180 		mmhub_read_system_context(adev, &pa_config);
1181 
1182 		// Call the DC init_memory func
1183 		dc_setup_system_context(adev->dm.dc, &pa_config);
1184 	}
1185 #endif
1186 
1187 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 	if (!adev->dm.freesync_module) {
1189 		DRM_ERROR(
1190 		"amdgpu: failed to initialize freesync_module.\n");
1191 	} else
1192 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 				adev->dm.freesync_module);
1194 
1195 	amdgpu_dm_init_color_mod();
1196 
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 	if (adev->dm.dc->caps.max_links > 0) {
1199 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200 
1201 		if (!adev->dm.vblank_workqueue)
1202 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203 		else
1204 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205 	}
1206 #endif
1207 
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211 
1212 		if (!adev->dm.hdcp_workqueue)
1213 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214 		else
1215 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216 
1217 		dc_init_callbacks(adev->dm.dc, &init_params);
1218 	}
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223 	if (amdgpu_dm_initialize_drm_device(adev)) {
1224 		DRM_ERROR(
1225 		"amdgpu: failed to initialize sw for display support.\n");
1226 		goto error;
1227 	}
1228 
1229 	/* create fake encoders for MST */
1230 	dm_dp_create_fake_mst_encoders(adev);
1231 
1232 	/* TODO: Add_display_info? */
1233 
1234 	/* TODO use dynamic cursor width */
1235 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237 
1238 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239 		DRM_ERROR(
1240 		"amdgpu: failed to initialize sw for display support.\n");
1241 		goto error;
1242 	}
1243 
1244 
1245 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1246 
1247 	return 0;
1248 error:
1249 	amdgpu_dm_fini(adev);
1250 
1251 	return -EINVAL;
1252 }
1253 
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256 	int i;
1257 
1258 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260 	}
1261 
1262 	amdgpu_dm_audio_fini(adev);
1263 
1264 	amdgpu_dm_destroy_drm_device(&adev->dm);
1265 
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 	if (adev->dm.crc_rd_wrk) {
1268 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 		kfree(adev->dm.crc_rd_wrk);
1270 		adev->dm.crc_rd_wrk = NULL;
1271 	}
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 	if (adev->dm.hdcp_workqueue) {
1275 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 		adev->dm.hdcp_workqueue = NULL;
1277 	}
1278 
1279 	if (adev->dm.dc)
1280 		dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282 
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 	if (adev->dm.vblank_workqueue) {
1285 		adev->dm.vblank_workqueue->dm = NULL;
1286 		kfree(adev->dm.vblank_workqueue);
1287 		adev->dm.vblank_workqueue = NULL;
1288 	}
1289 #endif
1290 
1291 	if (adev->dm.dc->ctx->dmub_srv) {
1292 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 		adev->dm.dc->ctx->dmub_srv = NULL;
1294 	}
1295 
1296 	if (adev->dm.dmub_bo)
1297 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 				      &adev->dm.dmub_bo_gpu_addr,
1299 				      &adev->dm.dmub_bo_cpu_addr);
1300 
1301 	/* DC Destroy TODO: Replace destroy DAL */
1302 	if (adev->dm.dc)
1303 		dc_destroy(&adev->dm.dc);
1304 	/*
1305 	 * TODO: pageflip, vlank interrupt
1306 	 *
1307 	 * amdgpu_dm_irq_fini(adev);
1308 	 */
1309 
1310 	if (adev->dm.cgs_device) {
1311 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 		adev->dm.cgs_device = NULL;
1313 	}
1314 	if (adev->dm.freesync_module) {
1315 		mod_freesync_destroy(adev->dm.freesync_module);
1316 		adev->dm.freesync_module = NULL;
1317 	}
1318 
1319 	mutex_destroy(&adev->dm.audio_lock);
1320 	mutex_destroy(&adev->dm.dc_lock);
1321 
1322 	return;
1323 }
1324 
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327 	const char *fw_name_dmcu = NULL;
1328 	int r;
1329 	const struct dmcu_firmware_header_v1_0 *hdr;
1330 
1331 	switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333 	case CHIP_TAHITI:
1334 	case CHIP_PITCAIRN:
1335 	case CHIP_VERDE:
1336 	case CHIP_OLAND:
1337 #endif
1338 	case CHIP_BONAIRE:
1339 	case CHIP_HAWAII:
1340 	case CHIP_KAVERI:
1341 	case CHIP_KABINI:
1342 	case CHIP_MULLINS:
1343 	case CHIP_TONGA:
1344 	case CHIP_FIJI:
1345 	case CHIP_CARRIZO:
1346 	case CHIP_STONEY:
1347 	case CHIP_POLARIS11:
1348 	case CHIP_POLARIS10:
1349 	case CHIP_POLARIS12:
1350 	case CHIP_VEGAM:
1351 	case CHIP_VEGA10:
1352 	case CHIP_VEGA12:
1353 	case CHIP_VEGA20:
1354 	case CHIP_NAVI10:
1355 	case CHIP_NAVI14:
1356 	case CHIP_RENOIR:
1357 	case CHIP_SIENNA_CICHLID:
1358 	case CHIP_NAVY_FLOUNDER:
1359 	case CHIP_DIMGREY_CAVEFISH:
1360 	case CHIP_VANGOGH:
1361 		return 0;
1362 	case CHIP_NAVI12:
1363 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364 		break;
1365 	case CHIP_RAVEN:
1366 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370 		else
1371 			return 0;
1372 		break;
1373 	default:
1374 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375 		return -EINVAL;
1376 	}
1377 
1378 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380 		return 0;
1381 	}
1382 
1383 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384 	if (r == -ENOENT) {
1385 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 		adev->dm.fw_dmcu = NULL;
1388 		return 0;
1389 	}
1390 	if (r) {
1391 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392 			fw_name_dmcu);
1393 		return r;
1394 	}
1395 
1396 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397 	if (r) {
1398 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399 			fw_name_dmcu);
1400 		release_firmware(adev->dm.fw_dmcu);
1401 		adev->dm.fw_dmcu = NULL;
1402 		return r;
1403 	}
1404 
1405 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 	adev->firmware.fw_size +=
1409 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410 
1411 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 	adev->firmware.fw_size +=
1414 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415 
1416 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417 
1418 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419 
1420 	return 0;
1421 }
1422 
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425 	struct amdgpu_device *adev = ctx;
1426 
1427 	return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429 
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431 				     uint32_t value)
1432 {
1433 	struct amdgpu_device *adev = ctx;
1434 
1435 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437 
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440 	struct dmub_srv_create_params create_params;
1441 	struct dmub_srv_region_params region_params;
1442 	struct dmub_srv_region_info region_info;
1443 	struct dmub_srv_fb_params fb_params;
1444 	struct dmub_srv_fb_info *fb_info;
1445 	struct dmub_srv *dmub_srv;
1446 	const struct dmcub_firmware_header_v1_0 *hdr;
1447 	const char *fw_name_dmub;
1448 	enum dmub_asic dmub_asic;
1449 	enum dmub_status status;
1450 	int r;
1451 
1452 	switch (adev->asic_type) {
1453 	case CHIP_RENOIR:
1454 		dmub_asic = DMUB_ASIC_DCN21;
1455 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458 		break;
1459 	case CHIP_SIENNA_CICHLID:
1460 		dmub_asic = DMUB_ASIC_DCN30;
1461 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462 		break;
1463 	case CHIP_NAVY_FLOUNDER:
1464 		dmub_asic = DMUB_ASIC_DCN30;
1465 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466 		break;
1467 	case CHIP_VANGOGH:
1468 		dmub_asic = DMUB_ASIC_DCN301;
1469 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470 		break;
1471 	case CHIP_DIMGREY_CAVEFISH:
1472 		dmub_asic = DMUB_ASIC_DCN302;
1473 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474 		break;
1475 
1476 	default:
1477 		/* ASIC doesn't support DMUB. */
1478 		return 0;
1479 	}
1480 
1481 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482 	if (r) {
1483 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484 		return 0;
1485 	}
1486 
1487 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488 	if (r) {
1489 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490 		return 0;
1491 	}
1492 
1493 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494 
1495 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 			AMDGPU_UCODE_ID_DMCUB;
1498 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499 			adev->dm.dmub_fw;
1500 		adev->firmware.fw_size +=
1501 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502 
1503 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 			 adev->dm.dmcub_fw_version);
1505 	}
1506 
1507 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508 
1509 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 	dmub_srv = adev->dm.dmub_srv;
1511 
1512 	if (!dmub_srv) {
1513 		DRM_ERROR("Failed to allocate DMUB service!\n");
1514 		return -ENOMEM;
1515 	}
1516 
1517 	memset(&create_params, 0, sizeof(create_params));
1518 	create_params.user_ctx = adev;
1519 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 	create_params.asic = dmub_asic;
1522 
1523 	/* Create the DMUB service. */
1524 	status = dmub_srv_create(dmub_srv, &create_params);
1525 	if (status != DMUB_STATUS_OK) {
1526 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1527 		return -EINVAL;
1528 	}
1529 
1530 	/* Calculate the size of all the regions for the DMUB service. */
1531 	memset(&region_params, 0, sizeof(region_params));
1532 
1533 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 	region_params.vbios_size = adev->bios_size;
1537 	region_params.fw_bss_data = region_params.bss_data_size ?
1538 		adev->dm.dmub_fw->data +
1539 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 	region_params.fw_inst_const =
1542 		adev->dm.dmub_fw->data +
1543 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544 		PSP_HEADER_BYTES;
1545 
1546 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547 					   &region_info);
1548 
1549 	if (status != DMUB_STATUS_OK) {
1550 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551 		return -EINVAL;
1552 	}
1553 
1554 	/*
1555 	 * Allocate a framebuffer based on the total size of all the regions.
1556 	 * TODO: Move this into GART.
1557 	 */
1558 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 				    &adev->dm.dmub_bo_gpu_addr,
1561 				    &adev->dm.dmub_bo_cpu_addr);
1562 	if (r)
1563 		return r;
1564 
1565 	/* Rebase the regions on the framebuffer address. */
1566 	memset(&fb_params, 0, sizeof(fb_params));
1567 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 	fb_params.region_info = &region_info;
1570 
1571 	adev->dm.dmub_fb_info =
1572 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 	fb_info = adev->dm.dmub_fb_info;
1574 
1575 	if (!fb_info) {
1576 		DRM_ERROR(
1577 			"Failed to allocate framebuffer info for DMUB service!\n");
1578 		return -ENOMEM;
1579 	}
1580 
1581 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 	if (status != DMUB_STATUS_OK) {
1583 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584 		return -EINVAL;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static int dm_sw_init(void *handle)
1591 {
1592 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 	int r;
1594 
1595 	r = dm_dmub_sw_init(adev);
1596 	if (r)
1597 		return r;
1598 
1599 	return load_dmcu_fw(adev);
1600 }
1601 
1602 static int dm_sw_fini(void *handle)
1603 {
1604 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605 
1606 	kfree(adev->dm.dmub_fb_info);
1607 	adev->dm.dmub_fb_info = NULL;
1608 
1609 	if (adev->dm.dmub_srv) {
1610 		dmub_srv_destroy(adev->dm.dmub_srv);
1611 		adev->dm.dmub_srv = NULL;
1612 	}
1613 
1614 	release_firmware(adev->dm.dmub_fw);
1615 	adev->dm.dmub_fw = NULL;
1616 
1617 	release_firmware(adev->dm.fw_dmcu);
1618 	adev->dm.fw_dmcu = NULL;
1619 
1620 	return 0;
1621 }
1622 
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625 	struct amdgpu_dm_connector *aconnector;
1626 	struct drm_connector *connector;
1627 	struct drm_connector_list_iter iter;
1628 	int ret = 0;
1629 
1630 	drm_connector_list_iter_begin(dev, &iter);
1631 	drm_for_each_connector_iter(connector, &iter) {
1632 		aconnector = to_amdgpu_dm_connector(connector);
1633 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 		    aconnector->mst_mgr.aux) {
1635 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636 					 aconnector,
1637 					 aconnector->base.base.id);
1638 
1639 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640 			if (ret < 0) {
1641 				DRM_ERROR("DM_MST: Failed to start MST\n");
1642 				aconnector->dc_link->type =
1643 					dc_connection_single;
1644 				break;
1645 			}
1646 		}
1647 	}
1648 	drm_connector_list_iter_end(&iter);
1649 
1650 	return ret;
1651 }
1652 
1653 static int dm_late_init(void *handle)
1654 {
1655 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 
1657 	struct dmcu_iram_parameters params;
1658 	unsigned int linear_lut[16];
1659 	int i;
1660 	struct dmcu *dmcu = NULL;
1661 	bool ret = true;
1662 
1663 	dmcu = adev->dm.dc->res_pool->dmcu;
1664 
1665 	for (i = 0; i < 16; i++)
1666 		linear_lut[i] = 0xFFFF * i / 15;
1667 
1668 	params.set = 0;
1669 	params.backlight_ramping_start = 0xCCCC;
1670 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 	params.backlight_lut_array_size = 16;
1672 	params.backlight_lut_array = linear_lut;
1673 
1674 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1675 	 * 0xFFFF x 0.01 = 0x28F
1676 	 */
1677 	params.min_abm_backlight = 0x28F;
1678 
1679 	/* In the case where abm is implemented on dmcub,
1680 	 * dmcu object will be null.
1681 	 * ABM 2.4 and up are implemented on dmcub.
1682 	 */
1683 	if (dmcu)
1684 		ret = dmcu_load_iram(dmcu, params);
1685 	else if (adev->dm.dc->ctx->dmub_srv)
1686 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687 
1688 	if (!ret)
1689 		return -EINVAL;
1690 
1691 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693 
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696 	struct amdgpu_dm_connector *aconnector;
1697 	struct drm_connector *connector;
1698 	struct drm_connector_list_iter iter;
1699 	struct drm_dp_mst_topology_mgr *mgr;
1700 	int ret;
1701 	bool need_hotplug = false;
1702 
1703 	drm_connector_list_iter_begin(dev, &iter);
1704 	drm_for_each_connector_iter(connector, &iter) {
1705 		aconnector = to_amdgpu_dm_connector(connector);
1706 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 		    aconnector->mst_port)
1708 			continue;
1709 
1710 		mgr = &aconnector->mst_mgr;
1711 
1712 		if (suspend) {
1713 			drm_dp_mst_topology_mgr_suspend(mgr);
1714 		} else {
1715 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716 			if (ret < 0) {
1717 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 				need_hotplug = true;
1719 			}
1720 		}
1721 	}
1722 	drm_connector_list_iter_end(&iter);
1723 
1724 	if (need_hotplug)
1725 		drm_kms_helper_hotplug_event(dev);
1726 }
1727 
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730 	struct smu_context *smu = &adev->smu;
1731 	int ret = 0;
1732 
1733 	if (!is_support_sw_smu(adev))
1734 		return 0;
1735 
1736 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 	 * on window driver dc implementation.
1738 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 	 * should be passed to smu during boot up and resume from s3.
1740 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 	 * dcn20_resource_construct
1742 	 * then call pplib functions below to pass the settings to smu:
1743 	 * smu_set_watermarks_for_clock_ranges
1744 	 * smu_set_watermarks_table
1745 	 * navi10_set_watermarks_table
1746 	 * smu_write_watermarks_table
1747 	 *
1748 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 	 * dc has implemented different flow for window driver:
1750 	 * dc_hardware_init / dc_set_power_state
1751 	 * dcn10_init_hw
1752 	 * notify_wm_ranges
1753 	 * set_wm_ranges
1754 	 * -- Linux
1755 	 * smu_set_watermarks_for_clock_ranges
1756 	 * renoir_set_watermarks_table
1757 	 * smu_write_watermarks_table
1758 	 *
1759 	 * For Linux,
1760 	 * dc_hardware_init -> amdgpu_dm_init
1761 	 * dc_set_power_state --> dm_resume
1762 	 *
1763 	 * therefore, this function apply to navi10/12/14 but not Renoir
1764 	 * *
1765 	 */
1766 	switch(adev->asic_type) {
1767 	case CHIP_NAVI10:
1768 	case CHIP_NAVI14:
1769 	case CHIP_NAVI12:
1770 		break;
1771 	default:
1772 		return 0;
1773 	}
1774 
1775 	ret = smu_write_watermarks_table(smu);
1776 	if (ret) {
1777 		DRM_ERROR("Failed to update WMTABLE!\n");
1778 		return ret;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 	/* Create DAL display manager */
1808 	amdgpu_dm_init(adev);
1809 	amdgpu_dm_hpd_init(adev);
1810 
1811 	return 0;
1812 }
1813 
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825 
1826 	amdgpu_dm_hpd_fini(adev);
1827 
1828 	amdgpu_dm_irq_fini(adev);
1829 	amdgpu_dm_fini(adev);
1830 	return 0;
1831 }
1832 
1833 
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836 
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 				 struct dc_state *state, bool enable)
1839 {
1840 	enum dc_irq_source irq_source;
1841 	struct amdgpu_crtc *acrtc;
1842 	int rc = -EBUSY;
1843 	int i = 0;
1844 
1845 	for (i = 0; i < state->stream_count; i++) {
1846 		acrtc = get_crtc_by_otg_inst(
1847 				adev, state->stream_status[i].primary_otg_inst);
1848 
1849 		if (acrtc && state->stream_status[i].plane_count != 0) {
1850 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1854 			if (rc)
1855 				DRM_WARN("Failed to %s pflip interrupts\n",
1856 					 enable ? "enable" : "disable");
1857 
1858 			if (enable) {
1859 				rc = dm_enable_vblank(&acrtc->base);
1860 				if (rc)
1861 					DRM_WARN("Failed to enable vblank interrupts\n");
1862 			} else {
1863 				dm_disable_vblank(&acrtc->base);
1864 			}
1865 
1866 		}
1867 	}
1868 
1869 }
1870 
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873 	struct dc_state *context = NULL;
1874 	enum dc_status res = DC_ERROR_UNEXPECTED;
1875 	int i;
1876 	struct dc_stream_state *del_streams[MAX_PIPES];
1877 	int del_streams_count = 0;
1878 
1879 	memset(del_streams, 0, sizeof(del_streams));
1880 
1881 	context = dc_create_state(dc);
1882 	if (context == NULL)
1883 		goto context_alloc_fail;
1884 
1885 	dc_resource_state_copy_construct_current(dc, context);
1886 
1887 	/* First remove from context all streams */
1888 	for (i = 0; i < context->stream_count; i++) {
1889 		struct dc_stream_state *stream = context->streams[i];
1890 
1891 		del_streams[del_streams_count++] = stream;
1892 	}
1893 
1894 	/* Remove all planes for removed streams and then remove the streams */
1895 	for (i = 0; i < del_streams_count; i++) {
1896 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 			res = DC_FAIL_DETACH_SURFACES;
1898 			goto fail;
1899 		}
1900 
1901 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902 		if (res != DC_OK)
1903 			goto fail;
1904 	}
1905 
1906 
1907 	res = dc_validate_global_state(dc, context, false);
1908 
1909 	if (res != DC_OK) {
1910 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911 		goto fail;
1912 	}
1913 
1914 	res = dc_commit_state(dc, context);
1915 
1916 fail:
1917 	dc_release_state(context);
1918 
1919 context_alloc_fail:
1920 	return res;
1921 }
1922 
1923 static int dm_suspend(void *handle)
1924 {
1925 	struct amdgpu_device *adev = handle;
1926 	struct amdgpu_display_manager *dm = &adev->dm;
1927 	int ret = 0;
1928 
1929 	if (amdgpu_in_reset(adev)) {
1930 		mutex_lock(&dm->dc_lock);
1931 
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 		dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935 
1936 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937 
1938 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939 
1940 		amdgpu_dm_commit_zero_streams(dm->dc);
1941 
1942 		amdgpu_dm_irq_suspend(adev);
1943 
1944 		return ret;
1945 	}
1946 
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 	amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950 	WARN_ON(adev->dm.cached_state);
1951 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952 
1953 	s3_handle_mst(adev_to_drm(adev), true);
1954 
1955 	amdgpu_dm_irq_suspend(adev);
1956 
1957 
1958 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959 
1960 	return 0;
1961 }
1962 
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 					     struct drm_crtc *crtc)
1966 {
1967 	uint32_t i;
1968 	struct drm_connector_state *new_con_state;
1969 	struct drm_connector *connector;
1970 	struct drm_crtc *crtc_from_state;
1971 
1972 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 		crtc_from_state = new_con_state->crtc;
1974 
1975 		if (crtc_from_state == crtc)
1976 			return to_amdgpu_dm_connector(connector);
1977 	}
1978 
1979 	return NULL;
1980 }
1981 
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984 	struct dc_sink_init_data sink_init_data = { 0 };
1985 	struct display_sink_capability sink_caps = { 0 };
1986 	enum dc_edid_status edid_status;
1987 	struct dc_context *dc_ctx = link->ctx;
1988 	struct dc_sink *sink = NULL;
1989 	struct dc_sink *prev_sink = NULL;
1990 
1991 	link->type = dc_connection_none;
1992 	prev_sink = link->local_sink;
1993 
1994 	if (prev_sink)
1995 		dc_sink_release(prev_sink);
1996 
1997 	switch (link->connector_signal) {
1998 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001 		break;
2002 	}
2003 
2004 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007 		break;
2008 	}
2009 
2010 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013 		break;
2014 	}
2015 
2016 	case SIGNAL_TYPE_LVDS: {
2017 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2019 		break;
2020 	}
2021 
2022 	case SIGNAL_TYPE_EDP: {
2023 		sink_caps.transaction_type =
2024 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 		sink_caps.signal = SIGNAL_TYPE_EDP;
2026 		break;
2027 	}
2028 
2029 	case SIGNAL_TYPE_DISPLAY_PORT: {
2030 		sink_caps.transaction_type =
2031 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033 		break;
2034 	}
2035 
2036 	default:
2037 		DC_ERROR("Invalid connector type! signal:%d\n",
2038 			link->connector_signal);
2039 		return;
2040 	}
2041 
2042 	sink_init_data.link = link;
2043 	sink_init_data.sink_signal = sink_caps.signal;
2044 
2045 	sink = dc_sink_create(&sink_init_data);
2046 	if (!sink) {
2047 		DC_ERROR("Failed to create sink!\n");
2048 		return;
2049 	}
2050 
2051 	/* dc_sink_create returns a new reference */
2052 	link->local_sink = sink;
2053 
2054 	edid_status = dm_helpers_read_local_edid(
2055 			link->ctx,
2056 			link,
2057 			sink);
2058 
2059 	if (edid_status != EDID_OK)
2060 		DC_ERROR("Failed to read EDID");
2061 
2062 }
2063 
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 				     struct amdgpu_display_manager *dm)
2066 {
2067 	struct {
2068 		struct dc_surface_update surface_updates[MAX_SURFACES];
2069 		struct dc_plane_info plane_infos[MAX_SURFACES];
2070 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 		struct dc_stream_update stream_update;
2073 	} * bundle;
2074 	int k, m;
2075 
2076 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077 
2078 	if (!bundle) {
2079 		dm_error("Failed to allocate update bundle\n");
2080 		goto cleanup;
2081 	}
2082 
2083 	for (k = 0; k < dc_state->stream_count; k++) {
2084 		bundle->stream_update.stream = dc_state->streams[k];
2085 
2086 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 			bundle->surface_updates[m].surface =
2088 				dc_state->stream_status->plane_states[m];
2089 			bundle->surface_updates[m].surface->force_full_update =
2090 				true;
2091 		}
2092 		dc_commit_updates_for_stream(
2093 			dm->dc, bundle->surface_updates,
2094 			dc_state->stream_status->plane_count,
2095 			dc_state->streams[k], &bundle->stream_update, dc_state);
2096 	}
2097 
2098 cleanup:
2099 	kfree(bundle);
2100 
2101 	return;
2102 }
2103 
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106 	struct dc_stream_state *stream_state;
2107 	struct amdgpu_dm_connector *aconnector = link->priv;
2108 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 	struct dc_stream_update stream_update;
2110 	bool dpms_off = true;
2111 
2112 	memset(&stream_update, 0, sizeof(stream_update));
2113 	stream_update.dpms_off = &dpms_off;
2114 
2115 	mutex_lock(&adev->dm.dc_lock);
2116 	stream_state = dc_stream_find_from_link(link);
2117 
2118 	if (stream_state == NULL) {
2119 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 		mutex_unlock(&adev->dm.dc_lock);
2121 		return;
2122 	}
2123 
2124 	stream_update.stream = stream_state;
2125 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 				     stream_state, &stream_update,
2127 				     stream_state->ctx->dc->current_state);
2128 	mutex_unlock(&adev->dm.dc_lock);
2129 }
2130 
2131 static int dm_resume(void *handle)
2132 {
2133 	struct amdgpu_device *adev = handle;
2134 	struct drm_device *ddev = adev_to_drm(adev);
2135 	struct amdgpu_display_manager *dm = &adev->dm;
2136 	struct amdgpu_dm_connector *aconnector;
2137 	struct drm_connector *connector;
2138 	struct drm_connector_list_iter iter;
2139 	struct drm_crtc *crtc;
2140 	struct drm_crtc_state *new_crtc_state;
2141 	struct dm_crtc_state *dm_new_crtc_state;
2142 	struct drm_plane *plane;
2143 	struct drm_plane_state *new_plane_state;
2144 	struct dm_plane_state *dm_new_plane_state;
2145 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 	enum dc_connection_type new_connection_type = dc_connection_none;
2147 	struct dc_state *dc_state;
2148 	int i, r, j;
2149 
2150 	if (amdgpu_in_reset(adev)) {
2151 		dc_state = dm->cached_dc_state;
2152 
2153 		r = dm_dmub_hw_init(adev);
2154 		if (r)
2155 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156 
2157 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158 		dc_resume(dm->dc);
2159 
2160 		amdgpu_dm_irq_resume_early(adev);
2161 
2162 		for (i = 0; i < dc_state->stream_count; i++) {
2163 			dc_state->streams[i]->mode_changed = true;
2164 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 				dc_state->stream_status->plane_states[j]->update_flags.raw
2166 					= 0xffffffff;
2167 			}
2168 		}
2169 
2170 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171 
2172 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173 
2174 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175 
2176 		dc_release_state(dm->cached_dc_state);
2177 		dm->cached_dc_state = NULL;
2178 
2179 		amdgpu_dm_irq_resume_late(adev);
2180 
2181 		mutex_unlock(&dm->dc_lock);
2182 
2183 		return 0;
2184 	}
2185 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 	dc_release_state(dm_state->context);
2187 	dm_state->context = dc_create_state(dm->dc);
2188 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 	dc_resource_state_construct(dm->dc, dm_state->context);
2190 
2191 	/* Before powering on DC we need to re-initialize DMUB. */
2192 	r = dm_dmub_hw_init(adev);
2193 	if (r)
2194 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195 
2196 	/* power on hardware */
2197 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198 
2199 	/* program HPD filter */
2200 	dc_resume(dm->dc);
2201 
2202 	/*
2203 	 * early enable HPD Rx IRQ, should be done before set mode as short
2204 	 * pulse interrupts are used for MST
2205 	 */
2206 	amdgpu_dm_irq_resume_early(adev);
2207 
2208 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 	s3_handle_mst(ddev, false);
2210 
2211 	/* Do detection*/
2212 	drm_connector_list_iter_begin(ddev, &iter);
2213 	drm_for_each_connector_iter(connector, &iter) {
2214 		aconnector = to_amdgpu_dm_connector(connector);
2215 
2216 		/*
2217 		 * this is the case when traversing through already created
2218 		 * MST connectors, should be skipped
2219 		 */
2220 		if (aconnector->mst_port)
2221 			continue;
2222 
2223 		mutex_lock(&aconnector->hpd_lock);
2224 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 			DRM_ERROR("KMS: Failed to detect connector\n");
2226 
2227 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 			emulated_link_detect(aconnector->dc_link);
2229 		else
2230 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231 
2232 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 			aconnector->fake_enable = false;
2234 
2235 		if (aconnector->dc_sink)
2236 			dc_sink_release(aconnector->dc_sink);
2237 		aconnector->dc_sink = NULL;
2238 		amdgpu_dm_update_connector_after_detect(aconnector);
2239 		mutex_unlock(&aconnector->hpd_lock);
2240 	}
2241 	drm_connector_list_iter_end(&iter);
2242 
2243 	/* Force mode set in atomic commit */
2244 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 		new_crtc_state->active_changed = true;
2246 
2247 	/*
2248 	 * atomic_check is expected to create the dc states. We need to release
2249 	 * them here, since they were duplicated as part of the suspend
2250 	 * procedure.
2251 	 */
2252 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 		if (dm_new_crtc_state->stream) {
2255 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 			dc_stream_release(dm_new_crtc_state->stream);
2257 			dm_new_crtc_state->stream = NULL;
2258 		}
2259 	}
2260 
2261 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 		if (dm_new_plane_state->dc_state) {
2264 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 			dc_plane_state_release(dm_new_plane_state->dc_state);
2266 			dm_new_plane_state->dc_state = NULL;
2267 		}
2268 	}
2269 
2270 	drm_atomic_helper_resume(ddev, dm->cached_state);
2271 
2272 	dm->cached_state = NULL;
2273 
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 	amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277 
2278 	amdgpu_dm_irq_resume_late(adev);
2279 
2280 	amdgpu_dm_smu_write_watermarks_table(adev);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294 
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296 	.name = "dm",
2297 	.early_init = dm_early_init,
2298 	.late_init = dm_late_init,
2299 	.sw_init = dm_sw_init,
2300 	.sw_fini = dm_sw_fini,
2301 	.hw_init = dm_hw_init,
2302 	.hw_fini = dm_hw_fini,
2303 	.suspend = dm_suspend,
2304 	.resume = dm_resume,
2305 	.is_idle = dm_is_idle,
2306 	.wait_for_idle = dm_wait_for_idle,
2307 	.check_soft_reset = dm_check_soft_reset,
2308 	.soft_reset = dm_soft_reset,
2309 	.set_clockgating_state = dm_set_clockgating_state,
2310 	.set_powergating_state = dm_set_powergating_state,
2311 };
2312 
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315 	.type = AMD_IP_BLOCK_TYPE_DCE,
2316 	.major = 1,
2317 	.minor = 0,
2318 	.rev = 0,
2319 	.funcs = &amdgpu_dm_funcs,
2320 };
2321 
2322 
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328 
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 	.fb_create = amdgpu_display_user_framebuffer_create,
2331 	.get_format_info = amd_get_format_info,
2332 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2333 	.atomic_check = amdgpu_dm_atomic_check,
2334 	.atomic_commit = drm_atomic_helper_commit,
2335 };
2336 
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340 
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343 	u32 max_cll, min_cll, max, min, q, r;
2344 	struct amdgpu_dm_backlight_caps *caps;
2345 	struct amdgpu_display_manager *dm;
2346 	struct drm_connector *conn_base;
2347 	struct amdgpu_device *adev;
2348 	struct dc_link *link = NULL;
2349 	static const u8 pre_computed_values[] = {
2350 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352 
2353 	if (!aconnector || !aconnector->dc_link)
2354 		return;
2355 
2356 	link = aconnector->dc_link;
2357 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2358 		return;
2359 
2360 	conn_base = &aconnector->base;
2361 	adev = drm_to_adev(conn_base->dev);
2362 	dm = &adev->dm;
2363 	caps = &dm->backlight_caps;
2364 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 	caps->aux_support = false;
2366 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368 
2369 	if (caps->ext_caps->bits.oled == 1 ||
2370 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 		caps->aux_support = true;
2373 
2374 	if (amdgpu_backlight == 0)
2375 		caps->aux_support = false;
2376 	else if (amdgpu_backlight == 1)
2377 		caps->aux_support = true;
2378 
2379 	/* From the specification (CTA-861-G), for calculating the maximum
2380 	 * luminance we need to use:
2381 	 *	Luminance = 50*2**(CV/32)
2382 	 * Where CV is a one-byte value.
2383 	 * For calculating this expression we may need float point precision;
2384 	 * to avoid this complexity level, we take advantage that CV is divided
2385 	 * by a constant. From the Euclids division algorithm, we know that CV
2386 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 	 * need to pre-compute the value of r/32. For pre-computing the values
2389 	 * We just used the following Ruby line:
2390 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 	 * The results of the above expressions can be verified at
2392 	 * pre_computed_values.
2393 	 */
2394 	q = max_cll >> 5;
2395 	r = max_cll % 32;
2396 	max = (1 << q) * pre_computed_values[r];
2397 
2398 	// min luminance: maxLum * (CV/255)^2 / 100
2399 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401 
2402 	caps->aux_max_input_signal = max;
2403 	caps->aux_min_input_signal = min;
2404 }
2405 
2406 void amdgpu_dm_update_connector_after_detect(
2407 		struct amdgpu_dm_connector *aconnector)
2408 {
2409 	struct drm_connector *connector = &aconnector->base;
2410 	struct drm_device *dev = connector->dev;
2411 	struct dc_sink *sink;
2412 
2413 	/* MST handled by drm_mst framework */
2414 	if (aconnector->mst_mgr.mst_state == true)
2415 		return;
2416 
2417 	sink = aconnector->dc_link->local_sink;
2418 	if (sink)
2419 		dc_sink_retain(sink);
2420 
2421 	/*
2422 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 	 * the connector sink is set to either fake or physical sink depends on link status.
2424 	 * Skip if already done during boot.
2425 	 */
2426 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 			&& aconnector->dc_em_sink) {
2428 
2429 		/*
2430 		 * For S3 resume with headless use eml_sink to fake stream
2431 		 * because on resume connector->sink is set to NULL
2432 		 */
2433 		mutex_lock(&dev->mode_config.mutex);
2434 
2435 		if (sink) {
2436 			if (aconnector->dc_sink) {
2437 				amdgpu_dm_update_freesync_caps(connector, NULL);
2438 				/*
2439 				 * retain and release below are used to
2440 				 * bump up refcount for sink because the link doesn't point
2441 				 * to it anymore after disconnect, so on next crtc to connector
2442 				 * reshuffle by UMD we will get into unwanted dc_sink release
2443 				 */
2444 				dc_sink_release(aconnector->dc_sink);
2445 			}
2446 			aconnector->dc_sink = sink;
2447 			dc_sink_retain(aconnector->dc_sink);
2448 			amdgpu_dm_update_freesync_caps(connector,
2449 					aconnector->edid);
2450 		} else {
2451 			amdgpu_dm_update_freesync_caps(connector, NULL);
2452 			if (!aconnector->dc_sink) {
2453 				aconnector->dc_sink = aconnector->dc_em_sink;
2454 				dc_sink_retain(aconnector->dc_sink);
2455 			}
2456 		}
2457 
2458 		mutex_unlock(&dev->mode_config.mutex);
2459 
2460 		if (sink)
2461 			dc_sink_release(sink);
2462 		return;
2463 	}
2464 
2465 	/*
2466 	 * TODO: temporary guard to look for proper fix
2467 	 * if this sink is MST sink, we should not do anything
2468 	 */
2469 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 		dc_sink_release(sink);
2471 		return;
2472 	}
2473 
2474 	if (aconnector->dc_sink == sink) {
2475 		/*
2476 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477 		 * Do nothing!!
2478 		 */
2479 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 				aconnector->connector_id);
2481 		if (sink)
2482 			dc_sink_release(sink);
2483 		return;
2484 	}
2485 
2486 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 		aconnector->connector_id, aconnector->dc_sink, sink);
2488 
2489 	mutex_lock(&dev->mode_config.mutex);
2490 
2491 	/*
2492 	 * 1. Update status of the drm connector
2493 	 * 2. Send an event and let userspace tell us what to do
2494 	 */
2495 	if (sink) {
2496 		/*
2497 		 * TODO: check if we still need the S3 mode update workaround.
2498 		 * If yes, put it here.
2499 		 */
2500 		if (aconnector->dc_sink) {
2501 			amdgpu_dm_update_freesync_caps(connector, NULL);
2502 			dc_sink_release(aconnector->dc_sink);
2503 		}
2504 
2505 		aconnector->dc_sink = sink;
2506 		dc_sink_retain(aconnector->dc_sink);
2507 		if (sink->dc_edid.length == 0) {
2508 			aconnector->edid = NULL;
2509 			if (aconnector->dc_link->aux_mode) {
2510 				drm_dp_cec_unset_edid(
2511 					&aconnector->dm_dp_aux.aux);
2512 			}
2513 		} else {
2514 			aconnector->edid =
2515 				(struct edid *)sink->dc_edid.raw_edid;
2516 
2517 			drm_connector_update_edid_property(connector,
2518 							   aconnector->edid);
2519 			if (aconnector->dc_link->aux_mode)
2520 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521 						    aconnector->edid);
2522 		}
2523 
2524 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 		update_connector_ext_caps(aconnector);
2526 	} else {
2527 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 		amdgpu_dm_update_freesync_caps(connector, NULL);
2529 		drm_connector_update_edid_property(connector, NULL);
2530 		aconnector->num_modes = 0;
2531 		dc_sink_release(aconnector->dc_sink);
2532 		aconnector->dc_sink = NULL;
2533 		aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539 	}
2540 
2541 	mutex_unlock(&dev->mode_config.mutex);
2542 
2543 	update_subconnector_property(aconnector);
2544 
2545 	if (sink)
2546 		dc_sink_release(sink);
2547 }
2548 
2549 static void handle_hpd_irq(void *param)
2550 {
2551 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 	struct drm_connector *connector = &aconnector->base;
2553 	struct drm_device *dev = connector->dev;
2554 	enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556 	struct amdgpu_device *adev = drm_to_adev(dev);
2557 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559 
2560 	/*
2561 	 * In case of failure or MST no need to update connector status or notify the OS
2562 	 * since (for MST case) MST does this in its own context.
2563 	 */
2564 	mutex_lock(&aconnector->hpd_lock);
2565 
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567 	if (adev->dm.hdcp_workqueue) {
2568 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569 		dm_con_state->update_hdcp = true;
2570 	}
2571 #endif
2572 	if (aconnector->fake_enable)
2573 		aconnector->fake_enable = false;
2574 
2575 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576 		DRM_ERROR("KMS: Failed to detect connector\n");
2577 
2578 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579 		emulated_link_detect(aconnector->dc_link);
2580 
2581 
2582 		drm_modeset_lock_all(dev);
2583 		dm_restore_drm_connector_state(dev, connector);
2584 		drm_modeset_unlock_all(dev);
2585 
2586 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 			drm_kms_helper_hotplug_event(dev);
2588 
2589 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590 		if (new_connection_type == dc_connection_none &&
2591 		    aconnector->dc_link->type == dc_connection_none)
2592 			dm_set_dpms_off(aconnector->dc_link);
2593 
2594 		amdgpu_dm_update_connector_after_detect(aconnector);
2595 
2596 		drm_modeset_lock_all(dev);
2597 		dm_restore_drm_connector_state(dev, connector);
2598 		drm_modeset_unlock_all(dev);
2599 
2600 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601 			drm_kms_helper_hotplug_event(dev);
2602 	}
2603 	mutex_unlock(&aconnector->hpd_lock);
2604 
2605 }
2606 
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2608 {
2609 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610 	uint8_t dret;
2611 	bool new_irq_handled = false;
2612 	int dpcd_addr;
2613 	int dpcd_bytes_to_read;
2614 
2615 	const int max_process_count = 30;
2616 	int process_count = 0;
2617 
2618 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619 
2620 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2623 		dpcd_addr = DP_SINK_COUNT;
2624 	} else {
2625 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627 		dpcd_addr = DP_SINK_COUNT_ESI;
2628 	}
2629 
2630 	dret = drm_dp_dpcd_read(
2631 		&aconnector->dm_dp_aux.aux,
2632 		dpcd_addr,
2633 		esi,
2634 		dpcd_bytes_to_read);
2635 
2636 	while (dret == dpcd_bytes_to_read &&
2637 		process_count < max_process_count) {
2638 		uint8_t retry;
2639 		dret = 0;
2640 
2641 		process_count++;
2642 
2643 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644 		/* handle HPD short pulse irq */
2645 		if (aconnector->mst_mgr.mst_state)
2646 			drm_dp_mst_hpd_irq(
2647 				&aconnector->mst_mgr,
2648 				esi,
2649 				&new_irq_handled);
2650 
2651 		if (new_irq_handled) {
2652 			/* ACK at DPCD to notify down stream */
2653 			const int ack_dpcd_bytes_to_write =
2654 				dpcd_bytes_to_read - 1;
2655 
2656 			for (retry = 0; retry < 3; retry++) {
2657 				uint8_t wret;
2658 
2659 				wret = drm_dp_dpcd_write(
2660 					&aconnector->dm_dp_aux.aux,
2661 					dpcd_addr + 1,
2662 					&esi[1],
2663 					ack_dpcd_bytes_to_write);
2664 				if (wret == ack_dpcd_bytes_to_write)
2665 					break;
2666 			}
2667 
2668 			/* check if there is new irq to be handled */
2669 			dret = drm_dp_dpcd_read(
2670 				&aconnector->dm_dp_aux.aux,
2671 				dpcd_addr,
2672 				esi,
2673 				dpcd_bytes_to_read);
2674 
2675 			new_irq_handled = false;
2676 		} else {
2677 			break;
2678 		}
2679 	}
2680 
2681 	if (process_count == max_process_count)
2682 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2683 }
2684 
2685 static void handle_hpd_rx_irq(void *param)
2686 {
2687 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688 	struct drm_connector *connector = &aconnector->base;
2689 	struct drm_device *dev = connector->dev;
2690 	struct dc_link *dc_link = aconnector->dc_link;
2691 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692 	bool result = false;
2693 	enum dc_connection_type new_connection_type = dc_connection_none;
2694 	struct amdgpu_device *adev = drm_to_adev(dev);
2695 	union hpd_irq_data hpd_irq_data;
2696 
2697 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2698 
2699 	/*
2700 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701 	 * conflict, after implement i2c helper, this mutex should be
2702 	 * retired.
2703 	 */
2704 	if (dc_link->type != dc_connection_mst_branch)
2705 		mutex_lock(&aconnector->hpd_lock);
2706 
2707 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708 
2709 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710 		(dc_link->type == dc_connection_mst_branch)) {
2711 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712 			result = true;
2713 			dm_handle_hpd_rx_irq(aconnector);
2714 			goto out;
2715 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716 			result = false;
2717 			dm_handle_hpd_rx_irq(aconnector);
2718 			goto out;
2719 		}
2720 	}
2721 
2722 	mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2725 #else
2726 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2727 #endif
2728 	mutex_unlock(&adev->dm.dc_lock);
2729 
2730 out:
2731 	if (result && !is_mst_root_connector) {
2732 		/* Downstream Port status changed. */
2733 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734 			DRM_ERROR("KMS: Failed to detect connector\n");
2735 
2736 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737 			emulated_link_detect(dc_link);
2738 
2739 			if (aconnector->fake_enable)
2740 				aconnector->fake_enable = false;
2741 
2742 			amdgpu_dm_update_connector_after_detect(aconnector);
2743 
2744 
2745 			drm_modeset_lock_all(dev);
2746 			dm_restore_drm_connector_state(dev, connector);
2747 			drm_modeset_unlock_all(dev);
2748 
2749 			drm_kms_helper_hotplug_event(dev);
2750 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2751 
2752 			if (aconnector->fake_enable)
2753 				aconnector->fake_enable = false;
2754 
2755 			amdgpu_dm_update_connector_after_detect(aconnector);
2756 
2757 
2758 			drm_modeset_lock_all(dev);
2759 			dm_restore_drm_connector_state(dev, connector);
2760 			drm_modeset_unlock_all(dev);
2761 
2762 			drm_kms_helper_hotplug_event(dev);
2763 		}
2764 	}
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767 		if (adev->dm.hdcp_workqueue)
2768 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2769 	}
2770 #endif
2771 
2772 	if (dc_link->type != dc_connection_mst_branch) {
2773 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774 		mutex_unlock(&aconnector->hpd_lock);
2775 	}
2776 }
2777 
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2779 {
2780 	struct drm_device *dev = adev_to_drm(adev);
2781 	struct drm_connector *connector;
2782 	struct amdgpu_dm_connector *aconnector;
2783 	const struct dc_link *dc_link;
2784 	struct dc_interrupt_params int_params = {0};
2785 
2786 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788 
2789 	list_for_each_entry(connector,
2790 			&dev->mode_config.connector_list, head)	{
2791 
2792 		aconnector = to_amdgpu_dm_connector(connector);
2793 		dc_link = aconnector->dc_link;
2794 
2795 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797 			int_params.irq_source = dc_link->irq_source_hpd;
2798 
2799 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 					handle_hpd_irq,
2801 					(void *) aconnector);
2802 		}
2803 
2804 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805 
2806 			/* Also register for DP short pulse (hpd_rx). */
2807 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2809 
2810 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811 					handle_hpd_rx_irq,
2812 					(void *) aconnector);
2813 		}
2814 	}
2815 }
2816 
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820 {
2821 	struct dc *dc = adev->dm.dc;
2822 	struct common_irq_params *c_irq_params;
2823 	struct dc_interrupt_params int_params = {0};
2824 	int r;
2825 	int i;
2826 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827 
2828 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830 
2831 	/*
2832 	 * Actions of amdgpu_irq_add_id():
2833 	 * 1. Register a set() function with base driver.
2834 	 *    Base driver will call set() function to enable/disable an
2835 	 *    interrupt in DC hardware.
2836 	 * 2. Register amdgpu_dm_irq_handler().
2837 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838 	 *    coming from DC hardware.
2839 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840 	 *    for acknowledging and handling. */
2841 
2842 	/* Use VBLANK interrupt */
2843 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845 		if (r) {
2846 			DRM_ERROR("Failed to add crtc irq id!\n");
2847 			return r;
2848 		}
2849 
2850 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 		int_params.irq_source =
2852 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2853 
2854 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855 
2856 		c_irq_params->adev = adev;
2857 		c_irq_params->irq_src = int_params.irq_source;
2858 
2859 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 				dm_crtc_high_irq, c_irq_params);
2861 	}
2862 
2863 	/* Use GRPH_PFLIP interrupt */
2864 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867 		if (r) {
2868 			DRM_ERROR("Failed to add page flip irq id!\n");
2869 			return r;
2870 		}
2871 
2872 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 		int_params.irq_source =
2874 			dc_interrupt_to_irq_source(dc, i, 0);
2875 
2876 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877 
2878 		c_irq_params->adev = adev;
2879 		c_irq_params->irq_src = int_params.irq_source;
2880 
2881 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 				dm_pflip_high_irq, c_irq_params);
2883 
2884 	}
2885 
2886 	/* HPD */
2887 	r = amdgpu_irq_add_id(adev, client_id,
2888 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889 	if (r) {
2890 		DRM_ERROR("Failed to add hpd irq id!\n");
2891 		return r;
2892 	}
2893 
2894 	register_hpd_handlers(adev);
2895 
2896 	return 0;
2897 }
2898 #endif
2899 
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903 	struct dc *dc = adev->dm.dc;
2904 	struct common_irq_params *c_irq_params;
2905 	struct dc_interrupt_params int_params = {0};
2906 	int r;
2907 	int i;
2908 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909 
2910 	if (adev->asic_type >= CHIP_VEGA10)
2911 		client_id = SOC15_IH_CLIENTID_DCE;
2912 
2913 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915 
2916 	/*
2917 	 * Actions of amdgpu_irq_add_id():
2918 	 * 1. Register a set() function with base driver.
2919 	 *    Base driver will call set() function to enable/disable an
2920 	 *    interrupt in DC hardware.
2921 	 * 2. Register amdgpu_dm_irq_handler().
2922 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 	 *    coming from DC hardware.
2924 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 	 *    for acknowledging and handling. */
2926 
2927 	/* Use VBLANK interrupt */
2928 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2930 		if (r) {
2931 			DRM_ERROR("Failed to add crtc irq id!\n");
2932 			return r;
2933 		}
2934 
2935 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 		int_params.irq_source =
2937 			dc_interrupt_to_irq_source(dc, i, 0);
2938 
2939 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940 
2941 		c_irq_params->adev = adev;
2942 		c_irq_params->irq_src = int_params.irq_source;
2943 
2944 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 				dm_crtc_high_irq, c_irq_params);
2946 	}
2947 
2948 	/* Use VUPDATE interrupt */
2949 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951 		if (r) {
2952 			DRM_ERROR("Failed to add vupdate irq id!\n");
2953 			return r;
2954 		}
2955 
2956 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957 		int_params.irq_source =
2958 			dc_interrupt_to_irq_source(dc, i, 0);
2959 
2960 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961 
2962 		c_irq_params->adev = adev;
2963 		c_irq_params->irq_src = int_params.irq_source;
2964 
2965 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966 				dm_vupdate_high_irq, c_irq_params);
2967 	}
2968 
2969 	/* Use GRPH_PFLIP interrupt */
2970 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2973 		if (r) {
2974 			DRM_ERROR("Failed to add page flip irq id!\n");
2975 			return r;
2976 		}
2977 
2978 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979 		int_params.irq_source =
2980 			dc_interrupt_to_irq_source(dc, i, 0);
2981 
2982 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983 
2984 		c_irq_params->adev = adev;
2985 		c_irq_params->irq_src = int_params.irq_source;
2986 
2987 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988 				dm_pflip_high_irq, c_irq_params);
2989 
2990 	}
2991 
2992 	/* HPD */
2993 	r = amdgpu_irq_add_id(adev, client_id,
2994 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2995 	if (r) {
2996 		DRM_ERROR("Failed to add hpd irq id!\n");
2997 		return r;
2998 	}
2999 
3000 	register_hpd_handlers(adev);
3001 
3002 	return 0;
3003 }
3004 
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008 {
3009 	struct dc *dc = adev->dm.dc;
3010 	struct common_irq_params *c_irq_params;
3011 	struct dc_interrupt_params int_params = {0};
3012 	int r;
3013 	int i;
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015 	static const unsigned int vrtl_int_srcid[] = {
3016 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022 	};
3023 #endif
3024 
3025 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027 
3028 	/*
3029 	 * Actions of amdgpu_irq_add_id():
3030 	 * 1. Register a set() function with base driver.
3031 	 *    Base driver will call set() function to enable/disable an
3032 	 *    interrupt in DC hardware.
3033 	 * 2. Register amdgpu_dm_irq_handler().
3034 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035 	 *    coming from DC hardware.
3036 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037 	 *    for acknowledging and handling.
3038 	 */
3039 
3040 	/* Use VSTARTUP interrupt */
3041 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043 			i++) {
3044 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3045 
3046 		if (r) {
3047 			DRM_ERROR("Failed to add crtc irq id!\n");
3048 			return r;
3049 		}
3050 
3051 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052 		int_params.irq_source =
3053 			dc_interrupt_to_irq_source(dc, i, 0);
3054 
3055 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056 
3057 		c_irq_params->adev = adev;
3058 		c_irq_params->irq_src = int_params.irq_source;
3059 
3060 		amdgpu_dm_irq_register_interrupt(
3061 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062 	}
3063 
3064 	/* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068 				vrtl_int_srcid[i], &adev->vline0_irq);
3069 
3070 		if (r) {
3071 			DRM_ERROR("Failed to add vline0 irq id!\n");
3072 			return r;
3073 		}
3074 
3075 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076 		int_params.irq_source =
3077 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078 
3079 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081 			break;
3082 		}
3083 
3084 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085 					- DC_IRQ_SOURCE_DC1_VLINE0];
3086 
3087 		c_irq_params->adev = adev;
3088 		c_irq_params->irq_src = int_params.irq_source;
3089 
3090 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092 	}
3093 #endif
3094 
3095 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097 	 * to trigger at end of each vblank, regardless of state of the lock,
3098 	 * matching DCE behaviour.
3099 	 */
3100 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102 	     i++) {
3103 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104 
3105 		if (r) {
3106 			DRM_ERROR("Failed to add vupdate irq id!\n");
3107 			return r;
3108 		}
3109 
3110 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 		int_params.irq_source =
3112 			dc_interrupt_to_irq_source(dc, i, 0);
3113 
3114 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115 
3116 		c_irq_params->adev = adev;
3117 		c_irq_params->irq_src = int_params.irq_source;
3118 
3119 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120 				dm_vupdate_high_irq, c_irq_params);
3121 	}
3122 
3123 	/* Use GRPH_PFLIP interrupt */
3124 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126 			i++) {
3127 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3128 		if (r) {
3129 			DRM_ERROR("Failed to add page flip irq id!\n");
3130 			return r;
3131 		}
3132 
3133 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 		int_params.irq_source =
3135 			dc_interrupt_to_irq_source(dc, i, 0);
3136 
3137 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138 
3139 		c_irq_params->adev = adev;
3140 		c_irq_params->irq_src = int_params.irq_source;
3141 
3142 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143 				dm_pflip_high_irq, c_irq_params);
3144 
3145 	}
3146 
3147 	if (dc->ctx->dmub_srv) {
3148 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150 
3151 		if (r) {
3152 			DRM_ERROR("Failed to add dmub trace irq id!\n");
3153 			return r;
3154 		}
3155 
3156 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157 		int_params.irq_source =
3158 			dc_interrupt_to_irq_source(dc, i, 0);
3159 
3160 		c_irq_params = &adev->dm.dmub_trace_params[0];
3161 
3162 		c_irq_params->adev = adev;
3163 		c_irq_params->irq_src = int_params.irq_source;
3164 
3165 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166 				dm_dmub_trace_high_irq, c_irq_params);
3167 	}
3168 
3169 	/* HPD */
3170 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3171 			&adev->hpd_irq);
3172 	if (r) {
3173 		DRM_ERROR("Failed to add hpd irq id!\n");
3174 		return r;
3175 	}
3176 
3177 	register_hpd_handlers(adev);
3178 
3179 	return 0;
3180 }
3181 #endif
3182 
3183 /*
3184  * Acquires the lock for the atomic state object and returns
3185  * the new atomic state.
3186  *
3187  * This should only be called during atomic check.
3188  */
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190 			       struct dm_atomic_state **dm_state)
3191 {
3192 	struct drm_device *dev = state->dev;
3193 	struct amdgpu_device *adev = drm_to_adev(dev);
3194 	struct amdgpu_display_manager *dm = &adev->dm;
3195 	struct drm_private_state *priv_state;
3196 
3197 	if (*dm_state)
3198 		return 0;
3199 
3200 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201 	if (IS_ERR(priv_state))
3202 		return PTR_ERR(priv_state);
3203 
3204 	*dm_state = to_dm_atomic_state(priv_state);
3205 
3206 	return 0;
3207 }
3208 
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3211 {
3212 	struct drm_device *dev = state->dev;
3213 	struct amdgpu_device *adev = drm_to_adev(dev);
3214 	struct amdgpu_display_manager *dm = &adev->dm;
3215 	struct drm_private_obj *obj;
3216 	struct drm_private_state *new_obj_state;
3217 	int i;
3218 
3219 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220 		if (obj->funcs == dm->atomic_obj.funcs)
3221 			return to_dm_atomic_state(new_obj_state);
3222 	}
3223 
3224 	return NULL;
3225 }
3226 
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229 {
3230 	struct dm_atomic_state *old_state, *new_state;
3231 
3232 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233 	if (!new_state)
3234 		return NULL;
3235 
3236 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237 
3238 	old_state = to_dm_atomic_state(obj->state);
3239 
3240 	if (old_state && old_state->context)
3241 		new_state->context = dc_copy_state(old_state->context);
3242 
3243 	if (!new_state->context) {
3244 		kfree(new_state);
3245 		return NULL;
3246 	}
3247 
3248 	return &new_state->base;
3249 }
3250 
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252 				    struct drm_private_state *state)
3253 {
3254 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255 
3256 	if (dm_state && dm_state->context)
3257 		dc_release_state(dm_state->context);
3258 
3259 	kfree(dm_state);
3260 }
3261 
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3264 	.atomic_destroy_state = dm_atomic_destroy_state,
3265 };
3266 
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268 {
3269 	struct dm_atomic_state *state;
3270 	int r;
3271 
3272 	adev->mode_info.mode_config_initialized = true;
3273 
3274 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3276 
3277 	adev_to_drm(adev)->mode_config.max_width = 16384;
3278 	adev_to_drm(adev)->mode_config.max_height = 16384;
3279 
3280 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282 	/* indicates support for immediate flip */
3283 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3284 
3285 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3286 
3287 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3288 	if (!state)
3289 		return -ENOMEM;
3290 
3291 	state->context = dc_create_state(adev->dm.dc);
3292 	if (!state->context) {
3293 		kfree(state);
3294 		return -ENOMEM;
3295 	}
3296 
3297 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298 
3299 	drm_atomic_private_obj_init(adev_to_drm(adev),
3300 				    &adev->dm.atomic_obj,
3301 				    &state->base,
3302 				    &dm_atomic_state_funcs);
3303 
3304 	r = amdgpu_display_modeset_create_props(adev);
3305 	if (r) {
3306 		dc_release_state(state->context);
3307 		kfree(state);
3308 		return r;
3309 	}
3310 
3311 	r = amdgpu_dm_audio_init(adev);
3312 	if (r) {
3313 		dc_release_state(state->context);
3314 		kfree(state);
3315 		return r;
3316 	}
3317 
3318 	return 0;
3319 }
3320 
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3324 
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327 
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329 {
3330 #if defined(CONFIG_ACPI)
3331 	struct amdgpu_dm_backlight_caps caps;
3332 
3333 	memset(&caps, 0, sizeof(caps));
3334 
3335 	if (dm->backlight_caps.caps_valid)
3336 		return;
3337 
3338 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339 	if (caps.caps_valid) {
3340 		dm->backlight_caps.caps_valid = true;
3341 		if (caps.aux_support)
3342 			return;
3343 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3345 	} else {
3346 		dm->backlight_caps.min_input_signal =
3347 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348 		dm->backlight_caps.max_input_signal =
3349 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350 	}
3351 #else
3352 	if (dm->backlight_caps.aux_support)
3353 		return;
3354 
3355 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 #endif
3358 }
3359 
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361 				unsigned *min, unsigned *max)
3362 {
3363 	if (!caps)
3364 		return 0;
3365 
3366 	if (caps->aux_support) {
3367 		// Firmware limits are in nits, DC API wants millinits.
3368 		*max = 1000 * caps->aux_max_input_signal;
3369 		*min = 1000 * caps->aux_min_input_signal;
3370 	} else {
3371 		// Firmware limits are 8-bit, PWM control is 16-bit.
3372 		*max = 0x101 * caps->max_input_signal;
3373 		*min = 0x101 * caps->min_input_signal;
3374 	}
3375 	return 1;
3376 }
3377 
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379 					uint32_t brightness)
3380 {
3381 	unsigned min, max;
3382 
3383 	if (!get_brightness_range(caps, &min, &max))
3384 		return brightness;
3385 
3386 	// Rescale 0..255 to min..max
3387 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388 				       AMDGPU_MAX_BL_LEVEL);
3389 }
3390 
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392 				      uint32_t brightness)
3393 {
3394 	unsigned min, max;
3395 
3396 	if (!get_brightness_range(caps, &min, &max))
3397 		return brightness;
3398 
3399 	if (brightness < min)
3400 		return 0;
3401 	// Rescale min..max to 0..255
3402 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403 				 max - min);
3404 }
3405 
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407 {
3408 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3409 	struct amdgpu_dm_backlight_caps caps;
3410 	struct dc_link *link = NULL;
3411 	u32 brightness;
3412 	bool rc;
3413 
3414 	amdgpu_dm_update_backlight_caps(dm);
3415 	caps = dm->backlight_caps;
3416 
3417 	link = (struct dc_link *)dm->backlight_link;
3418 
3419 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420 	// Change brightness based on AUX property
3421 	if (caps.aux_support)
3422 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424 	else
3425 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3426 
3427 	return rc ? 0 : 1;
3428 }
3429 
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431 {
3432 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3433 	struct amdgpu_dm_backlight_caps caps;
3434 
3435 	amdgpu_dm_update_backlight_caps(dm);
3436 	caps = dm->backlight_caps;
3437 
3438 	if (caps.aux_support) {
3439 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440 		u32 avg, peak;
3441 		bool rc;
3442 
3443 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444 		if (!rc)
3445 			return bd->props.brightness;
3446 		return convert_brightness_to_user(&caps, avg);
3447 	} else {
3448 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3449 
3450 		if (ret == DC_ERROR_UNEXPECTED)
3451 			return bd->props.brightness;
3452 		return convert_brightness_to_user(&caps, ret);
3453 	}
3454 }
3455 
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457 	.options = BL_CORE_SUSPENDRESUME,
3458 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3459 	.update_status	= amdgpu_dm_backlight_update_status,
3460 };
3461 
3462 static void
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3464 {
3465 	char bl_name[16];
3466 	struct backlight_properties props = { 0 };
3467 
3468 	amdgpu_dm_update_backlight_caps(dm);
3469 
3470 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3472 	props.type = BACKLIGHT_RAW;
3473 
3474 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475 		 adev_to_drm(dm->adev)->primary->index);
3476 
3477 	dm->backlight_dev = backlight_device_register(bl_name,
3478 						      adev_to_drm(dm->adev)->dev,
3479 						      dm,
3480 						      &amdgpu_dm_backlight_ops,
3481 						      &props);
3482 
3483 	if (IS_ERR(dm->backlight_dev))
3484 		DRM_ERROR("DM: Backlight registration failed!\n");
3485 	else
3486 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3487 }
3488 
3489 #endif
3490 
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492 			    struct amdgpu_mode_info *mode_info, int plane_id,
3493 			    enum drm_plane_type plane_type,
3494 			    const struct dc_plane_cap *plane_cap)
3495 {
3496 	struct drm_plane *plane;
3497 	unsigned long possible_crtcs;
3498 	int ret = 0;
3499 
3500 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3501 	if (!plane) {
3502 		DRM_ERROR("KMS: Failed to allocate plane\n");
3503 		return -ENOMEM;
3504 	}
3505 	plane->type = plane_type;
3506 
3507 	/*
3508 	 * HACK: IGT tests expect that the primary plane for a CRTC
3509 	 * can only have one possible CRTC. Only expose support for
3510 	 * any CRTC if they're not going to be used as a primary plane
3511 	 * for a CRTC - like overlay or underlay planes.
3512 	 */
3513 	possible_crtcs = 1 << plane_id;
3514 	if (plane_id >= dm->dc->caps.max_streams)
3515 		possible_crtcs = 0xff;
3516 
3517 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3518 
3519 	if (ret) {
3520 		DRM_ERROR("KMS: Failed to initialize plane\n");
3521 		kfree(plane);
3522 		return ret;
3523 	}
3524 
3525 	if (mode_info)
3526 		mode_info->planes[plane_id] = plane;
3527 
3528 	return ret;
3529 }
3530 
3531 
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533 				      struct dc_link *link)
3534 {
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537 
3538 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539 	    link->type != dc_connection_none) {
3540 		/*
3541 		 * Event if registration failed, we should continue with
3542 		 * DM initialization because not having a backlight control
3543 		 * is better then a black screen.
3544 		 */
3545 		amdgpu_dm_register_backlight_device(dm);
3546 
3547 		if (dm->backlight_dev)
3548 			dm->backlight_link = link;
3549 	}
3550 #endif
3551 }
3552 
3553 
3554 /*
3555  * In this architecture, the association
3556  * connector -> encoder -> crtc
3557  * id not really requried. The crtc and connector will hold the
3558  * display_index as an abstraction to use with DAL component
3559  *
3560  * Returns 0 on success
3561  */
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3563 {
3564 	struct amdgpu_display_manager *dm = &adev->dm;
3565 	int32_t i;
3566 	struct amdgpu_dm_connector *aconnector = NULL;
3567 	struct amdgpu_encoder *aencoder = NULL;
3568 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3569 	uint32_t link_cnt;
3570 	int32_t primary_planes;
3571 	enum dc_connection_type new_connection_type = dc_connection_none;
3572 	const struct dc_plane_cap *plane;
3573 
3574 	dm->display_indexes_num = dm->dc->caps.max_streams;
3575 	/* Update the actual used number of crtc */
3576 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577 
3578 	link_cnt = dm->dc->caps.max_links;
3579 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3580 		DRM_ERROR("DM: Failed to initialize mode config\n");
3581 		return -EINVAL;
3582 	}
3583 
3584 	/* There is one primary plane per CRTC */
3585 	primary_planes = dm->dc->caps.max_streams;
3586 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3587 
3588 	/*
3589 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3590 	 * Order is reversed to match iteration order in atomic check.
3591 	 */
3592 	for (i = (primary_planes - 1); i >= 0; i--) {
3593 		plane = &dm->dc->caps.planes[i];
3594 
3595 		if (initialize_plane(dm, mode_info, i,
3596 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3597 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3598 			goto fail;
3599 		}
3600 	}
3601 
3602 	/*
3603 	 * Initialize overlay planes, index starting after primary planes.
3604 	 * These planes have a higher DRM index than the primary planes since
3605 	 * they should be considered as having a higher z-order.
3606 	 * Order is reversed to match iteration order in atomic check.
3607 	 *
3608 	 * Only support DCN for now, and only expose one so we don't encourage
3609 	 * userspace to use up all the pipes.
3610 	 */
3611 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613 
3614 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615 			continue;
3616 
3617 		if (!plane->blends_with_above || !plane->blends_with_below)
3618 			continue;
3619 
3620 		if (!plane->pixel_format_support.argb8888)
3621 			continue;
3622 
3623 		if (initialize_plane(dm, NULL, primary_planes + i,
3624 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3625 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3626 			goto fail;
3627 		}
3628 
3629 		/* Only create one overlay plane. */
3630 		break;
3631 	}
3632 
3633 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3634 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3636 			goto fail;
3637 		}
3638 
3639 	/* loops over all connectors on the board */
3640 	for (i = 0; i < link_cnt; i++) {
3641 		struct dc_link *link = NULL;
3642 
3643 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644 			DRM_ERROR(
3645 				"KMS: Cannot support more than %d display indexes\n",
3646 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3647 			continue;
3648 		}
3649 
3650 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651 		if (!aconnector)
3652 			goto fail;
3653 
3654 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3655 		if (!aencoder)
3656 			goto fail;
3657 
3658 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3660 			goto fail;
3661 		}
3662 
3663 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664 			DRM_ERROR("KMS: Failed to initialize connector\n");
3665 			goto fail;
3666 		}
3667 
3668 		link = dc_get_link_at_index(dm->dc, i);
3669 
3670 		if (!dc_link_detect_sink(link, &new_connection_type))
3671 			DRM_ERROR("KMS: Failed to detect connector\n");
3672 
3673 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674 			emulated_link_detect(link);
3675 			amdgpu_dm_update_connector_after_detect(aconnector);
3676 
3677 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678 			amdgpu_dm_update_connector_after_detect(aconnector);
3679 			register_backlight_device(dm, link);
3680 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681 				amdgpu_dm_set_psr_caps(link);
3682 		}
3683 
3684 
3685 	}
3686 
3687 	/* Software is initialized. Now we can register interrupt handlers. */
3688 	switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3690 	case CHIP_TAHITI:
3691 	case CHIP_PITCAIRN:
3692 	case CHIP_VERDE:
3693 	case CHIP_OLAND:
3694 		if (dce60_register_irq_handlers(dm->adev)) {
3695 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3696 			goto fail;
3697 		}
3698 		break;
3699 #endif
3700 	case CHIP_BONAIRE:
3701 	case CHIP_HAWAII:
3702 	case CHIP_KAVERI:
3703 	case CHIP_KABINI:
3704 	case CHIP_MULLINS:
3705 	case CHIP_TONGA:
3706 	case CHIP_FIJI:
3707 	case CHIP_CARRIZO:
3708 	case CHIP_STONEY:
3709 	case CHIP_POLARIS11:
3710 	case CHIP_POLARIS10:
3711 	case CHIP_POLARIS12:
3712 	case CHIP_VEGAM:
3713 	case CHIP_VEGA10:
3714 	case CHIP_VEGA12:
3715 	case CHIP_VEGA20:
3716 		if (dce110_register_irq_handlers(dm->adev)) {
3717 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3718 			goto fail;
3719 		}
3720 		break;
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722 	case CHIP_RAVEN:
3723 	case CHIP_NAVI12:
3724 	case CHIP_NAVI10:
3725 	case CHIP_NAVI14:
3726 	case CHIP_RENOIR:
3727 	case CHIP_SIENNA_CICHLID:
3728 	case CHIP_NAVY_FLOUNDER:
3729 	case CHIP_DIMGREY_CAVEFISH:
3730 	case CHIP_VANGOGH:
3731 		if (dcn10_register_irq_handlers(dm->adev)) {
3732 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3733 			goto fail;
3734 		}
3735 		break;
3736 #endif
3737 	default:
3738 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3739 		goto fail;
3740 	}
3741 
3742 	return 0;
3743 fail:
3744 	kfree(aencoder);
3745 	kfree(aconnector);
3746 
3747 	return -EINVAL;
3748 }
3749 
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3751 {
3752 	drm_mode_config_cleanup(dm->ddev);
3753 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3754 	return;
3755 }
3756 
3757 /******************************************************************************
3758  * amdgpu_display_funcs functions
3759  *****************************************************************************/
3760 
3761 /*
3762  * dm_bandwidth_update - program display watermarks
3763  *
3764  * @adev: amdgpu_device pointer
3765  *
3766  * Calculate and program the display watermarks and line buffer allocation.
3767  */
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3769 {
3770 	/* TODO: implement later */
3771 }
3772 
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776 	.backlight_set_level = NULL, /* never called for DC */
3777 	.backlight_get_level = NULL, /* never called for DC */
3778 	.hpd_sense = NULL,/* called unconditionally */
3779 	.hpd_set_polarity = NULL, /* called unconditionally */
3780 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781 	.page_flip_get_scanoutpos =
3782 		dm_crtc_get_scanoutpos,/* called unconditionally */
3783 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3785 };
3786 
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3788 
3789 static ssize_t s3_debug_store(struct device *device,
3790 			      struct device_attribute *attr,
3791 			      const char *buf,
3792 			      size_t count)
3793 {
3794 	int ret;
3795 	int s3_state;
3796 	struct drm_device *drm_dev = dev_get_drvdata(device);
3797 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3798 
3799 	ret = kstrtoint(buf, 0, &s3_state);
3800 
3801 	if (ret == 0) {
3802 		if (s3_state) {
3803 			dm_resume(adev);
3804 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3805 		} else
3806 			dm_suspend(adev);
3807 	}
3808 
3809 	return ret == 0 ? count : 0;
3810 }
3811 
3812 DEVICE_ATTR_WO(s3_debug);
3813 
3814 #endif
3815 
3816 static int dm_early_init(void *handle)
3817 {
3818 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819 
3820 	switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3822 	case CHIP_TAHITI:
3823 	case CHIP_PITCAIRN:
3824 	case CHIP_VERDE:
3825 		adev->mode_info.num_crtc = 6;
3826 		adev->mode_info.num_hpd = 6;
3827 		adev->mode_info.num_dig = 6;
3828 		break;
3829 	case CHIP_OLAND:
3830 		adev->mode_info.num_crtc = 2;
3831 		adev->mode_info.num_hpd = 2;
3832 		adev->mode_info.num_dig = 2;
3833 		break;
3834 #endif
3835 	case CHIP_BONAIRE:
3836 	case CHIP_HAWAII:
3837 		adev->mode_info.num_crtc = 6;
3838 		adev->mode_info.num_hpd = 6;
3839 		adev->mode_info.num_dig = 6;
3840 		break;
3841 	case CHIP_KAVERI:
3842 		adev->mode_info.num_crtc = 4;
3843 		adev->mode_info.num_hpd = 6;
3844 		adev->mode_info.num_dig = 7;
3845 		break;
3846 	case CHIP_KABINI:
3847 	case CHIP_MULLINS:
3848 		adev->mode_info.num_crtc = 2;
3849 		adev->mode_info.num_hpd = 6;
3850 		adev->mode_info.num_dig = 6;
3851 		break;
3852 	case CHIP_FIJI:
3853 	case CHIP_TONGA:
3854 		adev->mode_info.num_crtc = 6;
3855 		adev->mode_info.num_hpd = 6;
3856 		adev->mode_info.num_dig = 7;
3857 		break;
3858 	case CHIP_CARRIZO:
3859 		adev->mode_info.num_crtc = 3;
3860 		adev->mode_info.num_hpd = 6;
3861 		adev->mode_info.num_dig = 9;
3862 		break;
3863 	case CHIP_STONEY:
3864 		adev->mode_info.num_crtc = 2;
3865 		adev->mode_info.num_hpd = 6;
3866 		adev->mode_info.num_dig = 9;
3867 		break;
3868 	case CHIP_POLARIS11:
3869 	case CHIP_POLARIS12:
3870 		adev->mode_info.num_crtc = 5;
3871 		adev->mode_info.num_hpd = 5;
3872 		adev->mode_info.num_dig = 5;
3873 		break;
3874 	case CHIP_POLARIS10:
3875 	case CHIP_VEGAM:
3876 		adev->mode_info.num_crtc = 6;
3877 		adev->mode_info.num_hpd = 6;
3878 		adev->mode_info.num_dig = 6;
3879 		break;
3880 	case CHIP_VEGA10:
3881 	case CHIP_VEGA12:
3882 	case CHIP_VEGA20:
3883 		adev->mode_info.num_crtc = 6;
3884 		adev->mode_info.num_hpd = 6;
3885 		adev->mode_info.num_dig = 6;
3886 		break;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3888 	case CHIP_RAVEN:
3889 	case CHIP_RENOIR:
3890 	case CHIP_VANGOGH:
3891 		adev->mode_info.num_crtc = 4;
3892 		adev->mode_info.num_hpd = 4;
3893 		adev->mode_info.num_dig = 4;
3894 		break;
3895 	case CHIP_NAVI10:
3896 	case CHIP_NAVI12:
3897 	case CHIP_SIENNA_CICHLID:
3898 	case CHIP_NAVY_FLOUNDER:
3899 		adev->mode_info.num_crtc = 6;
3900 		adev->mode_info.num_hpd = 6;
3901 		adev->mode_info.num_dig = 6;
3902 		break;
3903 	case CHIP_NAVI14:
3904 	case CHIP_DIMGREY_CAVEFISH:
3905 		adev->mode_info.num_crtc = 5;
3906 		adev->mode_info.num_hpd = 5;
3907 		adev->mode_info.num_dig = 5;
3908 		break;
3909 #endif
3910 	default:
3911 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912 		return -EINVAL;
3913 	}
3914 
3915 	amdgpu_dm_set_irq_funcs(adev);
3916 
3917 	if (adev->mode_info.funcs == NULL)
3918 		adev->mode_info.funcs = &dm_display_funcs;
3919 
3920 	/*
3921 	 * Note: Do NOT change adev->audio_endpt_rreg and
3922 	 * adev->audio_endpt_wreg because they are initialised in
3923 	 * amdgpu_device_init()
3924 	 */
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3926 	device_create_file(
3927 		adev_to_drm(adev)->dev,
3928 		&dev_attr_s3_debug);
3929 #endif
3930 
3931 	return 0;
3932 }
3933 
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935 			     struct dc_stream_state *new_stream,
3936 			     struct dc_stream_state *old_stream)
3937 {
3938 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3939 }
3940 
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3942 {
3943 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3944 }
3945 
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3947 {
3948 	drm_encoder_cleanup(encoder);
3949 	kfree(encoder);
3950 }
3951 
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953 	.destroy = amdgpu_dm_encoder_destroy,
3954 };
3955 
3956 
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958 					 struct drm_framebuffer *fb,
3959 					 int *min_downscale, int *max_upscale)
3960 {
3961 	struct amdgpu_device *adev = drm_to_adev(dev);
3962 	struct dc *dc = adev->dm.dc;
3963 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965 
3966 	switch (fb->format->format) {
3967 	case DRM_FORMAT_P010:
3968 	case DRM_FORMAT_NV12:
3969 	case DRM_FORMAT_NV21:
3970 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3971 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3972 		break;
3973 
3974 	case DRM_FORMAT_XRGB16161616F:
3975 	case DRM_FORMAT_ARGB16161616F:
3976 	case DRM_FORMAT_XBGR16161616F:
3977 	case DRM_FORMAT_ABGR16161616F:
3978 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3979 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3980 		break;
3981 
3982 	default:
3983 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3984 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3985 		break;
3986 	}
3987 
3988 	/*
3989 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990 	 * scaling factor of 1.0 == 1000 units.
3991 	 */
3992 	if (*max_upscale == 1)
3993 		*max_upscale = 1000;
3994 
3995 	if (*min_downscale == 1)
3996 		*min_downscale = 1000;
3997 }
3998 
3999 
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001 				struct dc_scaling_info *scaling_info)
4002 {
4003 	int scale_w, scale_h, min_downscale, max_upscale;
4004 
4005 	memset(scaling_info, 0, sizeof(*scaling_info));
4006 
4007 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4008 	scaling_info->src_rect.x = state->src_x >> 16;
4009 	scaling_info->src_rect.y = state->src_y >> 16;
4010 
4011 	scaling_info->src_rect.width = state->src_w >> 16;
4012 	if (scaling_info->src_rect.width == 0)
4013 		return -EINVAL;
4014 
4015 	scaling_info->src_rect.height = state->src_h >> 16;
4016 	if (scaling_info->src_rect.height == 0)
4017 		return -EINVAL;
4018 
4019 	scaling_info->dst_rect.x = state->crtc_x;
4020 	scaling_info->dst_rect.y = state->crtc_y;
4021 
4022 	if (state->crtc_w == 0)
4023 		return -EINVAL;
4024 
4025 	scaling_info->dst_rect.width = state->crtc_w;
4026 
4027 	if (state->crtc_h == 0)
4028 		return -EINVAL;
4029 
4030 	scaling_info->dst_rect.height = state->crtc_h;
4031 
4032 	/* DRM doesn't specify clipping on destination output. */
4033 	scaling_info->clip_rect = scaling_info->dst_rect;
4034 
4035 	/* Validate scaling per-format with DC plane caps */
4036 	if (state->plane && state->plane->dev && state->fb) {
4037 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038 					     &min_downscale, &max_upscale);
4039 	} else {
4040 		min_downscale = 250;
4041 		max_upscale = 16000;
4042 	}
4043 
4044 	scale_w = scaling_info->dst_rect.width * 1000 /
4045 		  scaling_info->src_rect.width;
4046 
4047 	if (scale_w < min_downscale || scale_w > max_upscale)
4048 		return -EINVAL;
4049 
4050 	scale_h = scaling_info->dst_rect.height * 1000 /
4051 		  scaling_info->src_rect.height;
4052 
4053 	if (scale_h < min_downscale || scale_h > max_upscale)
4054 		return -EINVAL;
4055 
4056 	/*
4057 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058 	 * assume reasonable defaults based on the format.
4059 	 */
4060 
4061 	return 0;
4062 }
4063 
4064 static void
4065 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066 				 uint64_t tiling_flags)
4067 {
4068 	/* Fill GFX8 params */
4069 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4071 
4072 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4077 
4078 		/* XXX fix me for VI */
4079 		tiling_info->gfx8.num_banks = num_banks;
4080 		tiling_info->gfx8.array_mode =
4081 				DC_ARRAY_2D_TILED_THIN1;
4082 		tiling_info->gfx8.tile_split = tile_split;
4083 		tiling_info->gfx8.bank_width = bankw;
4084 		tiling_info->gfx8.bank_height = bankh;
4085 		tiling_info->gfx8.tile_aspect = mtaspect;
4086 		tiling_info->gfx8.tile_mode =
4087 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089 			== DC_ARRAY_1D_TILED_THIN1) {
4090 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4091 	}
4092 
4093 	tiling_info->gfx8.pipe_config =
4094 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4095 }
4096 
4097 static void
4098 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099 				  union dc_tiling_info *tiling_info)
4100 {
4101 	tiling_info->gfx9.num_pipes =
4102 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4103 	tiling_info->gfx9.num_banks =
4104 		adev->gfx.config.gb_addr_config_fields.num_banks;
4105 	tiling_info->gfx9.pipe_interleave =
4106 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107 	tiling_info->gfx9.num_shader_engines =
4108 		adev->gfx.config.gb_addr_config_fields.num_se;
4109 	tiling_info->gfx9.max_compressed_frags =
4110 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111 	tiling_info->gfx9.num_rb_per_se =
4112 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113 	tiling_info->gfx9.shaderEnable = 1;
4114 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117 	    adev->asic_type == CHIP_VANGOGH)
4118 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4119 }
4120 
4121 static int
4122 validate_dcc(struct amdgpu_device *adev,
4123 	     const enum surface_pixel_format format,
4124 	     const enum dc_rotation_angle rotation,
4125 	     const union dc_tiling_info *tiling_info,
4126 	     const struct dc_plane_dcc_param *dcc,
4127 	     const struct dc_plane_address *address,
4128 	     const struct plane_size *plane_size)
4129 {
4130 	struct dc *dc = adev->dm.dc;
4131 	struct dc_dcc_surface_param input;
4132 	struct dc_surface_dcc_cap output;
4133 
4134 	memset(&input, 0, sizeof(input));
4135 	memset(&output, 0, sizeof(output));
4136 
4137 	if (!dcc->enable)
4138 		return 0;
4139 
4140 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141 	    !dc->cap_funcs.get_dcc_compression_cap)
4142 		return -EINVAL;
4143 
4144 	input.format = format;
4145 	input.surface_size.width = plane_size->surface_size.width;
4146 	input.surface_size.height = plane_size->surface_size.height;
4147 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4148 
4149 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4150 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4151 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4152 		input.scan = SCAN_DIRECTION_VERTICAL;
4153 
4154 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4155 		return -EINVAL;
4156 
4157 	if (!output.capable)
4158 		return -EINVAL;
4159 
4160 	if (dcc->independent_64b_blks == 0 &&
4161 	    output.grph.rgb.independent_64b_blks != 0)
4162 		return -EINVAL;
4163 
4164 	return 0;
4165 }
4166 
4167 static bool
4168 modifier_has_dcc(uint64_t modifier)
4169 {
4170 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4171 }
4172 
4173 static unsigned
4174 modifier_gfx9_swizzle_mode(uint64_t modifier)
4175 {
4176 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4177 		return 0;
4178 
4179 	return AMD_FMT_MOD_GET(TILE, modifier);
4180 }
4181 
4182 static const struct drm_format_info *
4183 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4184 {
4185 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4186 }
4187 
4188 static void
4189 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190 				    union dc_tiling_info *tiling_info,
4191 				    uint64_t modifier)
4192 {
4193 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4197 
4198 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4199 
4200 	if (!IS_AMD_FMT_MOD(modifier))
4201 		return;
4202 
4203 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4205 
4206 	if (adev->family >= AMDGPU_FAMILY_NV) {
4207 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4208 	} else {
4209 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4210 
4211 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4212 	}
4213 }
4214 
4215 enum dm_micro_swizzle {
4216 	MICRO_SWIZZLE_Z = 0,
4217 	MICRO_SWIZZLE_S = 1,
4218 	MICRO_SWIZZLE_D = 2,
4219 	MICRO_SWIZZLE_R = 3
4220 };
4221 
4222 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4223 					  uint32_t format,
4224 					  uint64_t modifier)
4225 {
4226 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227 	const struct drm_format_info *info = drm_format_info(format);
4228 
4229 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4230 
4231 	if (!info)
4232 		return false;
4233 
4234 	/*
4235 	 * We always have to allow this modifier, because core DRM still
4236 	 * checks LINEAR support if userspace does not provide modifers.
4237 	 */
4238 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4239 		return true;
4240 
4241 	/*
4242 	 * For D swizzle the canonical modifier depends on the bpp, so check
4243 	 * it here.
4244 	 */
4245 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4246 	    adev->family >= AMDGPU_FAMILY_NV) {
4247 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4248 			return false;
4249 	}
4250 
4251 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4252 	    info->cpp[0] < 8)
4253 		return false;
4254 
4255 	if (modifier_has_dcc(modifier)) {
4256 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4257 		if (info->cpp[0] != 4)
4258 			return false;
4259 		/* We support multi-planar formats, but not when combined with
4260 		 * additional DCC metadata planes. */
4261 		if (info->num_planes > 1)
4262 			return false;
4263 	}
4264 
4265 	return true;
4266 }
4267 
4268 static void
4269 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4270 {
4271 	if (!*mods)
4272 		return;
4273 
4274 	if (*cap - *size < 1) {
4275 		uint64_t new_cap = *cap * 2;
4276 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4277 
4278 		if (!new_mods) {
4279 			kfree(*mods);
4280 			*mods = NULL;
4281 			return;
4282 		}
4283 
4284 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4285 		kfree(*mods);
4286 		*mods = new_mods;
4287 		*cap = new_cap;
4288 	}
4289 
4290 	(*mods)[*size] = mod;
4291 	*size += 1;
4292 }
4293 
4294 static void
4295 add_gfx9_modifiers(const struct amdgpu_device *adev,
4296 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4297 {
4298 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4299 	int pipe_xor_bits = min(8, pipes +
4300 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4301 	int bank_xor_bits = min(8 - pipe_xor_bits,
4302 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4303 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4304 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4305 
4306 
4307 	if (adev->family == AMDGPU_FAMILY_RV) {
4308 		/* Raven2 and later */
4309 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4310 
4311 		/*
4312 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4313 		 * doesn't support _D on DCN
4314 		 */
4315 
4316 		if (has_constant_encode) {
4317 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4318 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4319 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4320 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4321 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4322 				    AMD_FMT_MOD_SET(DCC, 1) |
4323 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4324 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4325 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4326 		}
4327 
4328 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4329 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4330 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4331 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4332 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4333 			    AMD_FMT_MOD_SET(DCC, 1) |
4334 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4335 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4336 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4337 
4338 		if (has_constant_encode) {
4339 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4340 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4341 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4342 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4343 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4344 				    AMD_FMT_MOD_SET(DCC, 1) |
4345 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4346 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4347 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4348 
4349 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4350 				    AMD_FMT_MOD_SET(RB, rb) |
4351 				    AMD_FMT_MOD_SET(PIPE, pipes));
4352 		}
4353 
4354 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4355 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4356 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4357 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4358 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4359 			    AMD_FMT_MOD_SET(DCC, 1) |
4360 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4361 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4362 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4363 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4364 			    AMD_FMT_MOD_SET(RB, rb) |
4365 			    AMD_FMT_MOD_SET(PIPE, pipes));
4366 	}
4367 
4368 	/*
4369 	 * Only supported for 64bpp on Raven, will be filtered on format in
4370 	 * dm_plane_format_mod_supported.
4371 	 */
4372 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4373 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4374 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4375 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4376 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4377 
4378 	if (adev->family == AMDGPU_FAMILY_RV) {
4379 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4380 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4381 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4382 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4383 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4384 	}
4385 
4386 	/*
4387 	 * Only supported for 64bpp on Raven, will be filtered on format in
4388 	 * dm_plane_format_mod_supported.
4389 	 */
4390 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4391 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4392 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4393 
4394 	if (adev->family == AMDGPU_FAMILY_RV) {
4395 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4396 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4397 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4398 	}
4399 }
4400 
4401 static void
4402 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4403 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4404 {
4405 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4406 
4407 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4408 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4409 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4410 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4411 		    AMD_FMT_MOD_SET(DCC, 1) |
4412 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4413 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4414 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4415 
4416 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4417 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4418 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4419 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4420 		    AMD_FMT_MOD_SET(DCC, 1) |
4421 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4422 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4423 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4424 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4425 
4426 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4427 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4428 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4429 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4430 
4431 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4433 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4434 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4435 
4436 
4437 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4438 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4439 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4440 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4441 
4442 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4443 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4444 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4445 }
4446 
4447 static void
4448 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4449 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4450 {
4451 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4452 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4453 
4454 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4455 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4456 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4457 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4458 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4459 		    AMD_FMT_MOD_SET(DCC, 1) |
4460 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4461 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4463 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4464 
4465 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4466 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4467 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4468 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4469 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4470 		    AMD_FMT_MOD_SET(DCC, 1) |
4471 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4472 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4473 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4474 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4475 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4476 
4477 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4478 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4479 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4480 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4481 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4482 
4483 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4484 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4485 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4486 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4487 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4488 
4489 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4490 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4491 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4492 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4493 
4494 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4495 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4496 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4497 }
4498 
4499 static int
4500 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4501 {
4502 	uint64_t size = 0, capacity = 128;
4503 	*mods = NULL;
4504 
4505 	/* We have not hooked up any pre-GFX9 modifiers. */
4506 	if (adev->family < AMDGPU_FAMILY_AI)
4507 		return 0;
4508 
4509 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4510 
4511 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4512 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4513 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4514 		return *mods ? 0 : -ENOMEM;
4515 	}
4516 
4517 	switch (adev->family) {
4518 	case AMDGPU_FAMILY_AI:
4519 	case AMDGPU_FAMILY_RV:
4520 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4521 		break;
4522 	case AMDGPU_FAMILY_NV:
4523 	case AMDGPU_FAMILY_VGH:
4524 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4525 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4526 		else
4527 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4528 		break;
4529 	}
4530 
4531 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4532 
4533 	/* INVALID marks the end of the list. */
4534 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4535 
4536 	if (!*mods)
4537 		return -ENOMEM;
4538 
4539 	return 0;
4540 }
4541 
4542 static int
4543 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4544 					  const struct amdgpu_framebuffer *afb,
4545 					  const enum surface_pixel_format format,
4546 					  const enum dc_rotation_angle rotation,
4547 					  const struct plane_size *plane_size,
4548 					  union dc_tiling_info *tiling_info,
4549 					  struct dc_plane_dcc_param *dcc,
4550 					  struct dc_plane_address *address,
4551 					  const bool force_disable_dcc)
4552 {
4553 	const uint64_t modifier = afb->base.modifier;
4554 	int ret;
4555 
4556 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4557 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4558 
4559 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4560 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4561 
4562 		dcc->enable = 1;
4563 		dcc->meta_pitch = afb->base.pitches[1];
4564 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4565 
4566 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4567 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4568 	}
4569 
4570 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4571 	if (ret)
4572 		return ret;
4573 
4574 	return 0;
4575 }
4576 
4577 static int
4578 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4579 			     const struct amdgpu_framebuffer *afb,
4580 			     const enum surface_pixel_format format,
4581 			     const enum dc_rotation_angle rotation,
4582 			     const uint64_t tiling_flags,
4583 			     union dc_tiling_info *tiling_info,
4584 			     struct plane_size *plane_size,
4585 			     struct dc_plane_dcc_param *dcc,
4586 			     struct dc_plane_address *address,
4587 			     bool tmz_surface,
4588 			     bool force_disable_dcc)
4589 {
4590 	const struct drm_framebuffer *fb = &afb->base;
4591 	int ret;
4592 
4593 	memset(tiling_info, 0, sizeof(*tiling_info));
4594 	memset(plane_size, 0, sizeof(*plane_size));
4595 	memset(dcc, 0, sizeof(*dcc));
4596 	memset(address, 0, sizeof(*address));
4597 
4598 	address->tmz_surface = tmz_surface;
4599 
4600 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4601 		uint64_t addr = afb->address + fb->offsets[0];
4602 
4603 		plane_size->surface_size.x = 0;
4604 		plane_size->surface_size.y = 0;
4605 		plane_size->surface_size.width = fb->width;
4606 		plane_size->surface_size.height = fb->height;
4607 		plane_size->surface_pitch =
4608 			fb->pitches[0] / fb->format->cpp[0];
4609 
4610 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4611 		address->grph.addr.low_part = lower_32_bits(addr);
4612 		address->grph.addr.high_part = upper_32_bits(addr);
4613 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4614 		uint64_t luma_addr = afb->address + fb->offsets[0];
4615 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4616 
4617 		plane_size->surface_size.x = 0;
4618 		plane_size->surface_size.y = 0;
4619 		plane_size->surface_size.width = fb->width;
4620 		plane_size->surface_size.height = fb->height;
4621 		plane_size->surface_pitch =
4622 			fb->pitches[0] / fb->format->cpp[0];
4623 
4624 		plane_size->chroma_size.x = 0;
4625 		plane_size->chroma_size.y = 0;
4626 		/* TODO: set these based on surface format */
4627 		plane_size->chroma_size.width = fb->width / 2;
4628 		plane_size->chroma_size.height = fb->height / 2;
4629 
4630 		plane_size->chroma_pitch =
4631 			fb->pitches[1] / fb->format->cpp[1];
4632 
4633 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4634 		address->video_progressive.luma_addr.low_part =
4635 			lower_32_bits(luma_addr);
4636 		address->video_progressive.luma_addr.high_part =
4637 			upper_32_bits(luma_addr);
4638 		address->video_progressive.chroma_addr.low_part =
4639 			lower_32_bits(chroma_addr);
4640 		address->video_progressive.chroma_addr.high_part =
4641 			upper_32_bits(chroma_addr);
4642 	}
4643 
4644 	if (adev->family >= AMDGPU_FAMILY_AI) {
4645 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4646 								rotation, plane_size,
4647 								tiling_info, dcc,
4648 								address,
4649 								force_disable_dcc);
4650 		if (ret)
4651 			return ret;
4652 	} else {
4653 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4654 	}
4655 
4656 	return 0;
4657 }
4658 
4659 static void
4660 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4661 			       bool *per_pixel_alpha, bool *global_alpha,
4662 			       int *global_alpha_value)
4663 {
4664 	*per_pixel_alpha = false;
4665 	*global_alpha = false;
4666 	*global_alpha_value = 0xff;
4667 
4668 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4669 		return;
4670 
4671 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4672 		static const uint32_t alpha_formats[] = {
4673 			DRM_FORMAT_ARGB8888,
4674 			DRM_FORMAT_RGBA8888,
4675 			DRM_FORMAT_ABGR8888,
4676 		};
4677 		uint32_t format = plane_state->fb->format->format;
4678 		unsigned int i;
4679 
4680 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4681 			if (format == alpha_formats[i]) {
4682 				*per_pixel_alpha = true;
4683 				break;
4684 			}
4685 		}
4686 	}
4687 
4688 	if (plane_state->alpha < 0xffff) {
4689 		*global_alpha = true;
4690 		*global_alpha_value = plane_state->alpha >> 8;
4691 	}
4692 }
4693 
4694 static int
4695 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4696 			    const enum surface_pixel_format format,
4697 			    enum dc_color_space *color_space)
4698 {
4699 	bool full_range;
4700 
4701 	*color_space = COLOR_SPACE_SRGB;
4702 
4703 	/* DRM color properties only affect non-RGB formats. */
4704 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4705 		return 0;
4706 
4707 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4708 
4709 	switch (plane_state->color_encoding) {
4710 	case DRM_COLOR_YCBCR_BT601:
4711 		if (full_range)
4712 			*color_space = COLOR_SPACE_YCBCR601;
4713 		else
4714 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4715 		break;
4716 
4717 	case DRM_COLOR_YCBCR_BT709:
4718 		if (full_range)
4719 			*color_space = COLOR_SPACE_YCBCR709;
4720 		else
4721 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4722 		break;
4723 
4724 	case DRM_COLOR_YCBCR_BT2020:
4725 		if (full_range)
4726 			*color_space = COLOR_SPACE_2020_YCBCR;
4727 		else
4728 			return -EINVAL;
4729 		break;
4730 
4731 	default:
4732 		return -EINVAL;
4733 	}
4734 
4735 	return 0;
4736 }
4737 
4738 static int
4739 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4740 			    const struct drm_plane_state *plane_state,
4741 			    const uint64_t tiling_flags,
4742 			    struct dc_plane_info *plane_info,
4743 			    struct dc_plane_address *address,
4744 			    bool tmz_surface,
4745 			    bool force_disable_dcc)
4746 {
4747 	const struct drm_framebuffer *fb = plane_state->fb;
4748 	const struct amdgpu_framebuffer *afb =
4749 		to_amdgpu_framebuffer(plane_state->fb);
4750 	int ret;
4751 
4752 	memset(plane_info, 0, sizeof(*plane_info));
4753 
4754 	switch (fb->format->format) {
4755 	case DRM_FORMAT_C8:
4756 		plane_info->format =
4757 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4758 		break;
4759 	case DRM_FORMAT_RGB565:
4760 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4761 		break;
4762 	case DRM_FORMAT_XRGB8888:
4763 	case DRM_FORMAT_ARGB8888:
4764 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4765 		break;
4766 	case DRM_FORMAT_XRGB2101010:
4767 	case DRM_FORMAT_ARGB2101010:
4768 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4769 		break;
4770 	case DRM_FORMAT_XBGR2101010:
4771 	case DRM_FORMAT_ABGR2101010:
4772 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4773 		break;
4774 	case DRM_FORMAT_XBGR8888:
4775 	case DRM_FORMAT_ABGR8888:
4776 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4777 		break;
4778 	case DRM_FORMAT_NV21:
4779 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4780 		break;
4781 	case DRM_FORMAT_NV12:
4782 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4783 		break;
4784 	case DRM_FORMAT_P010:
4785 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4786 		break;
4787 	case DRM_FORMAT_XRGB16161616F:
4788 	case DRM_FORMAT_ARGB16161616F:
4789 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4790 		break;
4791 	case DRM_FORMAT_XBGR16161616F:
4792 	case DRM_FORMAT_ABGR16161616F:
4793 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4794 		break;
4795 	default:
4796 		DRM_ERROR(
4797 			"Unsupported screen format %p4cc\n",
4798 			&fb->format->format);
4799 		return -EINVAL;
4800 	}
4801 
4802 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4803 	case DRM_MODE_ROTATE_0:
4804 		plane_info->rotation = ROTATION_ANGLE_0;
4805 		break;
4806 	case DRM_MODE_ROTATE_90:
4807 		plane_info->rotation = ROTATION_ANGLE_90;
4808 		break;
4809 	case DRM_MODE_ROTATE_180:
4810 		plane_info->rotation = ROTATION_ANGLE_180;
4811 		break;
4812 	case DRM_MODE_ROTATE_270:
4813 		plane_info->rotation = ROTATION_ANGLE_270;
4814 		break;
4815 	default:
4816 		plane_info->rotation = ROTATION_ANGLE_0;
4817 		break;
4818 	}
4819 
4820 	plane_info->visible = true;
4821 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4822 
4823 	plane_info->layer_index = 0;
4824 
4825 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4826 					  &plane_info->color_space);
4827 	if (ret)
4828 		return ret;
4829 
4830 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4831 					   plane_info->rotation, tiling_flags,
4832 					   &plane_info->tiling_info,
4833 					   &plane_info->plane_size,
4834 					   &plane_info->dcc, address, tmz_surface,
4835 					   force_disable_dcc);
4836 	if (ret)
4837 		return ret;
4838 
4839 	fill_blending_from_plane_state(
4840 		plane_state, &plane_info->per_pixel_alpha,
4841 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4842 
4843 	return 0;
4844 }
4845 
4846 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4847 				    struct dc_plane_state *dc_plane_state,
4848 				    struct drm_plane_state *plane_state,
4849 				    struct drm_crtc_state *crtc_state)
4850 {
4851 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4852 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4853 	struct dc_scaling_info scaling_info;
4854 	struct dc_plane_info plane_info;
4855 	int ret;
4856 	bool force_disable_dcc = false;
4857 
4858 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4859 	if (ret)
4860 		return ret;
4861 
4862 	dc_plane_state->src_rect = scaling_info.src_rect;
4863 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4864 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4865 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4866 
4867 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4868 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4869 					  afb->tiling_flags,
4870 					  &plane_info,
4871 					  &dc_plane_state->address,
4872 					  afb->tmz_surface,
4873 					  force_disable_dcc);
4874 	if (ret)
4875 		return ret;
4876 
4877 	dc_plane_state->format = plane_info.format;
4878 	dc_plane_state->color_space = plane_info.color_space;
4879 	dc_plane_state->format = plane_info.format;
4880 	dc_plane_state->plane_size = plane_info.plane_size;
4881 	dc_plane_state->rotation = plane_info.rotation;
4882 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4883 	dc_plane_state->stereo_format = plane_info.stereo_format;
4884 	dc_plane_state->tiling_info = plane_info.tiling_info;
4885 	dc_plane_state->visible = plane_info.visible;
4886 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4887 	dc_plane_state->global_alpha = plane_info.global_alpha;
4888 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4889 	dc_plane_state->dcc = plane_info.dcc;
4890 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4891 	dc_plane_state->flip_int_enabled = true;
4892 
4893 	/*
4894 	 * Always set input transfer function, since plane state is refreshed
4895 	 * every time.
4896 	 */
4897 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4898 	if (ret)
4899 		return ret;
4900 
4901 	return 0;
4902 }
4903 
4904 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4905 					   const struct dm_connector_state *dm_state,
4906 					   struct dc_stream_state *stream)
4907 {
4908 	enum amdgpu_rmx_type rmx_type;
4909 
4910 	struct rect src = { 0 }; /* viewport in composition space*/
4911 	struct rect dst = { 0 }; /* stream addressable area */
4912 
4913 	/* no mode. nothing to be done */
4914 	if (!mode)
4915 		return;
4916 
4917 	/* Full screen scaling by default */
4918 	src.width = mode->hdisplay;
4919 	src.height = mode->vdisplay;
4920 	dst.width = stream->timing.h_addressable;
4921 	dst.height = stream->timing.v_addressable;
4922 
4923 	if (dm_state) {
4924 		rmx_type = dm_state->scaling;
4925 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4926 			if (src.width * dst.height <
4927 					src.height * dst.width) {
4928 				/* height needs less upscaling/more downscaling */
4929 				dst.width = src.width *
4930 						dst.height / src.height;
4931 			} else {
4932 				/* width needs less upscaling/more downscaling */
4933 				dst.height = src.height *
4934 						dst.width / src.width;
4935 			}
4936 		} else if (rmx_type == RMX_CENTER) {
4937 			dst = src;
4938 		}
4939 
4940 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4941 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4942 
4943 		if (dm_state->underscan_enable) {
4944 			dst.x += dm_state->underscan_hborder / 2;
4945 			dst.y += dm_state->underscan_vborder / 2;
4946 			dst.width -= dm_state->underscan_hborder;
4947 			dst.height -= dm_state->underscan_vborder;
4948 		}
4949 	}
4950 
4951 	stream->src = src;
4952 	stream->dst = dst;
4953 
4954 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4955 		      dst.x, dst.y, dst.width, dst.height);
4956 
4957 }
4958 
4959 static enum dc_color_depth
4960 convert_color_depth_from_display_info(const struct drm_connector *connector,
4961 				      bool is_y420, int requested_bpc)
4962 {
4963 	uint8_t bpc;
4964 
4965 	if (is_y420) {
4966 		bpc = 8;
4967 
4968 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4969 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4970 			bpc = 16;
4971 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4972 			bpc = 12;
4973 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4974 			bpc = 10;
4975 	} else {
4976 		bpc = (uint8_t)connector->display_info.bpc;
4977 		/* Assume 8 bpc by default if no bpc is specified. */
4978 		bpc = bpc ? bpc : 8;
4979 	}
4980 
4981 	if (requested_bpc > 0) {
4982 		/*
4983 		 * Cap display bpc based on the user requested value.
4984 		 *
4985 		 * The value for state->max_bpc may not correctly updated
4986 		 * depending on when the connector gets added to the state
4987 		 * or if this was called outside of atomic check, so it
4988 		 * can't be used directly.
4989 		 */
4990 		bpc = min_t(u8, bpc, requested_bpc);
4991 
4992 		/* Round down to the nearest even number. */
4993 		bpc = bpc - (bpc & 1);
4994 	}
4995 
4996 	switch (bpc) {
4997 	case 0:
4998 		/*
4999 		 * Temporary Work around, DRM doesn't parse color depth for
5000 		 * EDID revision before 1.4
5001 		 * TODO: Fix edid parsing
5002 		 */
5003 		return COLOR_DEPTH_888;
5004 	case 6:
5005 		return COLOR_DEPTH_666;
5006 	case 8:
5007 		return COLOR_DEPTH_888;
5008 	case 10:
5009 		return COLOR_DEPTH_101010;
5010 	case 12:
5011 		return COLOR_DEPTH_121212;
5012 	case 14:
5013 		return COLOR_DEPTH_141414;
5014 	case 16:
5015 		return COLOR_DEPTH_161616;
5016 	default:
5017 		return COLOR_DEPTH_UNDEFINED;
5018 	}
5019 }
5020 
5021 static enum dc_aspect_ratio
5022 get_aspect_ratio(const struct drm_display_mode *mode_in)
5023 {
5024 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5025 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5026 }
5027 
5028 static enum dc_color_space
5029 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5030 {
5031 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5032 
5033 	switch (dc_crtc_timing->pixel_encoding)	{
5034 	case PIXEL_ENCODING_YCBCR422:
5035 	case PIXEL_ENCODING_YCBCR444:
5036 	case PIXEL_ENCODING_YCBCR420:
5037 	{
5038 		/*
5039 		 * 27030khz is the separation point between HDTV and SDTV
5040 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5041 		 * respectively
5042 		 */
5043 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5044 			if (dc_crtc_timing->flags.Y_ONLY)
5045 				color_space =
5046 					COLOR_SPACE_YCBCR709_LIMITED;
5047 			else
5048 				color_space = COLOR_SPACE_YCBCR709;
5049 		} else {
5050 			if (dc_crtc_timing->flags.Y_ONLY)
5051 				color_space =
5052 					COLOR_SPACE_YCBCR601_LIMITED;
5053 			else
5054 				color_space = COLOR_SPACE_YCBCR601;
5055 		}
5056 
5057 	}
5058 	break;
5059 	case PIXEL_ENCODING_RGB:
5060 		color_space = COLOR_SPACE_SRGB;
5061 		break;
5062 
5063 	default:
5064 		WARN_ON(1);
5065 		break;
5066 	}
5067 
5068 	return color_space;
5069 }
5070 
5071 static bool adjust_colour_depth_from_display_info(
5072 	struct dc_crtc_timing *timing_out,
5073 	const struct drm_display_info *info)
5074 {
5075 	enum dc_color_depth depth = timing_out->display_color_depth;
5076 	int normalized_clk;
5077 	do {
5078 		normalized_clk = timing_out->pix_clk_100hz / 10;
5079 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5080 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5081 			normalized_clk /= 2;
5082 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5083 		switch (depth) {
5084 		case COLOR_DEPTH_888:
5085 			break;
5086 		case COLOR_DEPTH_101010:
5087 			normalized_clk = (normalized_clk * 30) / 24;
5088 			break;
5089 		case COLOR_DEPTH_121212:
5090 			normalized_clk = (normalized_clk * 36) / 24;
5091 			break;
5092 		case COLOR_DEPTH_161616:
5093 			normalized_clk = (normalized_clk * 48) / 24;
5094 			break;
5095 		default:
5096 			/* The above depths are the only ones valid for HDMI. */
5097 			return false;
5098 		}
5099 		if (normalized_clk <= info->max_tmds_clock) {
5100 			timing_out->display_color_depth = depth;
5101 			return true;
5102 		}
5103 	} while (--depth > COLOR_DEPTH_666);
5104 	return false;
5105 }
5106 
5107 static void fill_stream_properties_from_drm_display_mode(
5108 	struct dc_stream_state *stream,
5109 	const struct drm_display_mode *mode_in,
5110 	const struct drm_connector *connector,
5111 	const struct drm_connector_state *connector_state,
5112 	const struct dc_stream_state *old_stream,
5113 	int requested_bpc)
5114 {
5115 	struct dc_crtc_timing *timing_out = &stream->timing;
5116 	const struct drm_display_info *info = &connector->display_info;
5117 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5118 	struct hdmi_vendor_infoframe hv_frame;
5119 	struct hdmi_avi_infoframe avi_frame;
5120 
5121 	memset(&hv_frame, 0, sizeof(hv_frame));
5122 	memset(&avi_frame, 0, sizeof(avi_frame));
5123 
5124 	timing_out->h_border_left = 0;
5125 	timing_out->h_border_right = 0;
5126 	timing_out->v_border_top = 0;
5127 	timing_out->v_border_bottom = 0;
5128 	/* TODO: un-hardcode */
5129 	if (drm_mode_is_420_only(info, mode_in)
5130 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5131 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5132 	else if (drm_mode_is_420_also(info, mode_in)
5133 			&& aconnector->force_yuv420_output)
5134 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5135 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5136 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5137 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5138 	else
5139 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5140 
5141 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5142 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5143 		connector,
5144 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5145 		requested_bpc);
5146 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5147 	timing_out->hdmi_vic = 0;
5148 
5149 	if(old_stream) {
5150 		timing_out->vic = old_stream->timing.vic;
5151 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5152 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5153 	} else {
5154 		timing_out->vic = drm_match_cea_mode(mode_in);
5155 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5156 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5157 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5158 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5159 	}
5160 
5161 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5162 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5163 		timing_out->vic = avi_frame.video_code;
5164 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5165 		timing_out->hdmi_vic = hv_frame.vic;
5166 	}
5167 
5168 	if (is_freesync_video_mode(mode_in, aconnector)) {
5169 		timing_out->h_addressable = mode_in->hdisplay;
5170 		timing_out->h_total = mode_in->htotal;
5171 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5172 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5173 		timing_out->v_total = mode_in->vtotal;
5174 		timing_out->v_addressable = mode_in->vdisplay;
5175 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5176 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5177 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5178 	} else {
5179 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5180 		timing_out->h_total = mode_in->crtc_htotal;
5181 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5182 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5183 		timing_out->v_total = mode_in->crtc_vtotal;
5184 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5185 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5186 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5187 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5188 	}
5189 
5190 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5191 
5192 	stream->output_color_space = get_output_color_space(timing_out);
5193 
5194 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5195 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5196 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5197 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5198 		    drm_mode_is_420_also(info, mode_in) &&
5199 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5200 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5201 			adjust_colour_depth_from_display_info(timing_out, info);
5202 		}
5203 	}
5204 }
5205 
5206 static void fill_audio_info(struct audio_info *audio_info,
5207 			    const struct drm_connector *drm_connector,
5208 			    const struct dc_sink *dc_sink)
5209 {
5210 	int i = 0;
5211 	int cea_revision = 0;
5212 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5213 
5214 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5215 	audio_info->product_id = edid_caps->product_id;
5216 
5217 	cea_revision = drm_connector->display_info.cea_rev;
5218 
5219 	strscpy(audio_info->display_name,
5220 		edid_caps->display_name,
5221 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5222 
5223 	if (cea_revision >= 3) {
5224 		audio_info->mode_count = edid_caps->audio_mode_count;
5225 
5226 		for (i = 0; i < audio_info->mode_count; ++i) {
5227 			audio_info->modes[i].format_code =
5228 					(enum audio_format_code)
5229 					(edid_caps->audio_modes[i].format_code);
5230 			audio_info->modes[i].channel_count =
5231 					edid_caps->audio_modes[i].channel_count;
5232 			audio_info->modes[i].sample_rates.all =
5233 					edid_caps->audio_modes[i].sample_rate;
5234 			audio_info->modes[i].sample_size =
5235 					edid_caps->audio_modes[i].sample_size;
5236 		}
5237 	}
5238 
5239 	audio_info->flags.all = edid_caps->speaker_flags;
5240 
5241 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5242 	if (drm_connector->latency_present[0]) {
5243 		audio_info->video_latency = drm_connector->video_latency[0];
5244 		audio_info->audio_latency = drm_connector->audio_latency[0];
5245 	}
5246 
5247 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5248 
5249 }
5250 
5251 static void
5252 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5253 				      struct drm_display_mode *dst_mode)
5254 {
5255 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5256 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5257 	dst_mode->crtc_clock = src_mode->crtc_clock;
5258 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5259 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5260 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5261 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5262 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5263 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5264 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5265 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5266 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5267 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5268 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5269 }
5270 
5271 static void
5272 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5273 					const struct drm_display_mode *native_mode,
5274 					bool scale_enabled)
5275 {
5276 	if (scale_enabled) {
5277 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5278 	} else if (native_mode->clock == drm_mode->clock &&
5279 			native_mode->htotal == drm_mode->htotal &&
5280 			native_mode->vtotal == drm_mode->vtotal) {
5281 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5282 	} else {
5283 		/* no scaling nor amdgpu inserted, no need to patch */
5284 	}
5285 }
5286 
5287 static struct dc_sink *
5288 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5289 {
5290 	struct dc_sink_init_data sink_init_data = { 0 };
5291 	struct dc_sink *sink = NULL;
5292 	sink_init_data.link = aconnector->dc_link;
5293 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5294 
5295 	sink = dc_sink_create(&sink_init_data);
5296 	if (!sink) {
5297 		DRM_ERROR("Failed to create sink!\n");
5298 		return NULL;
5299 	}
5300 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5301 
5302 	return sink;
5303 }
5304 
5305 static void set_multisync_trigger_params(
5306 		struct dc_stream_state *stream)
5307 {
5308 	struct dc_stream_state *master = NULL;
5309 
5310 	if (stream->triggered_crtc_reset.enabled) {
5311 		master = stream->triggered_crtc_reset.event_source;
5312 		stream->triggered_crtc_reset.event =
5313 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5314 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5315 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5316 	}
5317 }
5318 
5319 static void set_master_stream(struct dc_stream_state *stream_set[],
5320 			      int stream_count)
5321 {
5322 	int j, highest_rfr = 0, master_stream = 0;
5323 
5324 	for (j = 0;  j < stream_count; j++) {
5325 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5326 			int refresh_rate = 0;
5327 
5328 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5329 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5330 			if (refresh_rate > highest_rfr) {
5331 				highest_rfr = refresh_rate;
5332 				master_stream = j;
5333 			}
5334 		}
5335 	}
5336 	for (j = 0;  j < stream_count; j++) {
5337 		if (stream_set[j])
5338 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5339 	}
5340 }
5341 
5342 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5343 {
5344 	int i = 0;
5345 	struct dc_stream_state *stream;
5346 
5347 	if (context->stream_count < 2)
5348 		return;
5349 	for (i = 0; i < context->stream_count ; i++) {
5350 		if (!context->streams[i])
5351 			continue;
5352 		/*
5353 		 * TODO: add a function to read AMD VSDB bits and set
5354 		 * crtc_sync_master.multi_sync_enabled flag
5355 		 * For now it's set to false
5356 		 */
5357 	}
5358 
5359 	set_master_stream(context->streams, context->stream_count);
5360 
5361 	for (i = 0; i < context->stream_count ; i++) {
5362 		stream = context->streams[i];
5363 
5364 		if (!stream)
5365 			continue;
5366 
5367 		set_multisync_trigger_params(stream);
5368 	}
5369 }
5370 
5371 static struct drm_display_mode *
5372 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5373 			  bool use_probed_modes)
5374 {
5375 	struct drm_display_mode *m, *m_pref = NULL;
5376 	u16 current_refresh, highest_refresh;
5377 	struct list_head *list_head = use_probed_modes ?
5378 						    &aconnector->base.probed_modes :
5379 						    &aconnector->base.modes;
5380 
5381 	if (aconnector->freesync_vid_base.clock != 0)
5382 		return &aconnector->freesync_vid_base;
5383 
5384 	/* Find the preferred mode */
5385 	list_for_each_entry (m, list_head, head) {
5386 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5387 			m_pref = m;
5388 			break;
5389 		}
5390 	}
5391 
5392 	if (!m_pref) {
5393 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5394 		m_pref = list_first_entry_or_null(
5395 			&aconnector->base.modes, struct drm_display_mode, head);
5396 		if (!m_pref) {
5397 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5398 			return NULL;
5399 		}
5400 	}
5401 
5402 	highest_refresh = drm_mode_vrefresh(m_pref);
5403 
5404 	/*
5405 	 * Find the mode with highest refresh rate with same resolution.
5406 	 * For some monitors, preferred mode is not the mode with highest
5407 	 * supported refresh rate.
5408 	 */
5409 	list_for_each_entry (m, list_head, head) {
5410 		current_refresh  = drm_mode_vrefresh(m);
5411 
5412 		if (m->hdisplay == m_pref->hdisplay &&
5413 		    m->vdisplay == m_pref->vdisplay &&
5414 		    highest_refresh < current_refresh) {
5415 			highest_refresh = current_refresh;
5416 			m_pref = m;
5417 		}
5418 	}
5419 
5420 	aconnector->freesync_vid_base = *m_pref;
5421 	return m_pref;
5422 }
5423 
5424 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5425 				   struct amdgpu_dm_connector *aconnector)
5426 {
5427 	struct drm_display_mode *high_mode;
5428 	int timing_diff;
5429 
5430 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5431 	if (!high_mode || !mode)
5432 		return false;
5433 
5434 	timing_diff = high_mode->vtotal - mode->vtotal;
5435 
5436 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5437 	    high_mode->hdisplay != mode->hdisplay ||
5438 	    high_mode->vdisplay != mode->vdisplay ||
5439 	    high_mode->hsync_start != mode->hsync_start ||
5440 	    high_mode->hsync_end != mode->hsync_end ||
5441 	    high_mode->htotal != mode->htotal ||
5442 	    high_mode->hskew != mode->hskew ||
5443 	    high_mode->vscan != mode->vscan ||
5444 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5445 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5446 		return false;
5447 	else
5448 		return true;
5449 }
5450 
5451 static struct dc_stream_state *
5452 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5453 		       const struct drm_display_mode *drm_mode,
5454 		       const struct dm_connector_state *dm_state,
5455 		       const struct dc_stream_state *old_stream,
5456 		       int requested_bpc)
5457 {
5458 	struct drm_display_mode *preferred_mode = NULL;
5459 	struct drm_connector *drm_connector;
5460 	const struct drm_connector_state *con_state =
5461 		dm_state ? &dm_state->base : NULL;
5462 	struct dc_stream_state *stream = NULL;
5463 	struct drm_display_mode mode = *drm_mode;
5464 	struct drm_display_mode saved_mode;
5465 	struct drm_display_mode *freesync_mode = NULL;
5466 	bool native_mode_found = false;
5467 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5468 	int mode_refresh;
5469 	int preferred_refresh = 0;
5470 #if defined(CONFIG_DRM_AMD_DC_DCN)
5471 	struct dsc_dec_dpcd_caps dsc_caps;
5472 	uint32_t link_bandwidth_kbps;
5473 #endif
5474 	struct dc_sink *sink = NULL;
5475 
5476 	memset(&saved_mode, 0, sizeof(saved_mode));
5477 
5478 	if (aconnector == NULL) {
5479 		DRM_ERROR("aconnector is NULL!\n");
5480 		return stream;
5481 	}
5482 
5483 	drm_connector = &aconnector->base;
5484 
5485 	if (!aconnector->dc_sink) {
5486 		sink = create_fake_sink(aconnector);
5487 		if (!sink)
5488 			return stream;
5489 	} else {
5490 		sink = aconnector->dc_sink;
5491 		dc_sink_retain(sink);
5492 	}
5493 
5494 	stream = dc_create_stream_for_sink(sink);
5495 
5496 	if (stream == NULL) {
5497 		DRM_ERROR("Failed to create stream for sink!\n");
5498 		goto finish;
5499 	}
5500 
5501 	stream->dm_stream_context = aconnector;
5502 
5503 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5504 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5505 
5506 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5507 		/* Search for preferred mode */
5508 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5509 			native_mode_found = true;
5510 			break;
5511 		}
5512 	}
5513 	if (!native_mode_found)
5514 		preferred_mode = list_first_entry_or_null(
5515 				&aconnector->base.modes,
5516 				struct drm_display_mode,
5517 				head);
5518 
5519 	mode_refresh = drm_mode_vrefresh(&mode);
5520 
5521 	if (preferred_mode == NULL) {
5522 		/*
5523 		 * This may not be an error, the use case is when we have no
5524 		 * usermode calls to reset and set mode upon hotplug. In this
5525 		 * case, we call set mode ourselves to restore the previous mode
5526 		 * and the modelist may not be filled in in time.
5527 		 */
5528 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5529 	} else {
5530 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5531 				 is_freesync_video_mode(&mode, aconnector);
5532 		if (recalculate_timing) {
5533 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5534 			saved_mode = mode;
5535 			mode = *freesync_mode;
5536 		} else {
5537 			decide_crtc_timing_for_drm_display_mode(
5538 				&mode, preferred_mode,
5539 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5540 		}
5541 
5542 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5543 	}
5544 
5545 	if (recalculate_timing)
5546 		drm_mode_set_crtcinfo(&saved_mode, 0);
5547 	else if (!dm_state)
5548 		drm_mode_set_crtcinfo(&mode, 0);
5549 
5550        /*
5551 	* If scaling is enabled and refresh rate didn't change
5552 	* we copy the vic and polarities of the old timings
5553 	*/
5554 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5555 		fill_stream_properties_from_drm_display_mode(
5556 			stream, &mode, &aconnector->base, con_state, NULL,
5557 			requested_bpc);
5558 	else
5559 		fill_stream_properties_from_drm_display_mode(
5560 			stream, &mode, &aconnector->base, con_state, old_stream,
5561 			requested_bpc);
5562 
5563 	stream->timing.flags.DSC = 0;
5564 
5565 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5566 #if defined(CONFIG_DRM_AMD_DC_DCN)
5567 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5568 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5569 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5570 				      &dsc_caps);
5571 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5572 							     dc_link_get_link_cap(aconnector->dc_link));
5573 
5574 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5575 			/* Set DSC policy according to dsc_clock_en */
5576 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5577 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5578 
5579 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5580 						  &dsc_caps,
5581 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5582 						  0,
5583 						  link_bandwidth_kbps,
5584 						  &stream->timing,
5585 						  &stream->timing.dsc_cfg))
5586 				stream->timing.flags.DSC = 1;
5587 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5588 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5589 				stream->timing.flags.DSC = 1;
5590 
5591 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5592 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5593 
5594 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5595 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5596 
5597 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5598 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5599 		}
5600 #endif
5601 	}
5602 
5603 	update_stream_scaling_settings(&mode, dm_state, stream);
5604 
5605 	fill_audio_info(
5606 		&stream->audio_info,
5607 		drm_connector,
5608 		sink);
5609 
5610 	update_stream_signal(stream, sink);
5611 
5612 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5613 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5614 
5615 	if (stream->link->psr_settings.psr_feature_enabled) {
5616 		//
5617 		// should decide stream support vsc sdp colorimetry capability
5618 		// before building vsc info packet
5619 		//
5620 		stream->use_vsc_sdp_for_colorimetry = false;
5621 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5622 			stream->use_vsc_sdp_for_colorimetry =
5623 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5624 		} else {
5625 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5626 				stream->use_vsc_sdp_for_colorimetry = true;
5627 		}
5628 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5629 	}
5630 finish:
5631 	dc_sink_release(sink);
5632 
5633 	return stream;
5634 }
5635 
5636 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5637 {
5638 	drm_crtc_cleanup(crtc);
5639 	kfree(crtc);
5640 }
5641 
5642 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5643 				  struct drm_crtc_state *state)
5644 {
5645 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5646 
5647 	/* TODO Destroy dc_stream objects are stream object is flattened */
5648 	if (cur->stream)
5649 		dc_stream_release(cur->stream);
5650 
5651 
5652 	__drm_atomic_helper_crtc_destroy_state(state);
5653 
5654 
5655 	kfree(state);
5656 }
5657 
5658 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5659 {
5660 	struct dm_crtc_state *state;
5661 
5662 	if (crtc->state)
5663 		dm_crtc_destroy_state(crtc, crtc->state);
5664 
5665 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5666 	if (WARN_ON(!state))
5667 		return;
5668 
5669 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5670 }
5671 
5672 static struct drm_crtc_state *
5673 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5674 {
5675 	struct dm_crtc_state *state, *cur;
5676 
5677 	cur = to_dm_crtc_state(crtc->state);
5678 
5679 	if (WARN_ON(!crtc->state))
5680 		return NULL;
5681 
5682 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5683 	if (!state)
5684 		return NULL;
5685 
5686 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5687 
5688 	if (cur->stream) {
5689 		state->stream = cur->stream;
5690 		dc_stream_retain(state->stream);
5691 	}
5692 
5693 	state->active_planes = cur->active_planes;
5694 	state->vrr_infopacket = cur->vrr_infopacket;
5695 	state->abm_level = cur->abm_level;
5696 	state->vrr_supported = cur->vrr_supported;
5697 	state->freesync_config = cur->freesync_config;
5698 	state->cm_has_degamma = cur->cm_has_degamma;
5699 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5700 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5701 
5702 	return &state->base;
5703 }
5704 
5705 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5706 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5707 {
5708 	crtc_debugfs_init(crtc);
5709 
5710 	return 0;
5711 }
5712 #endif
5713 
5714 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5715 {
5716 	enum dc_irq_source irq_source;
5717 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5718 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5719 	int rc;
5720 
5721 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5722 
5723 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5724 
5725 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5726 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5727 	return rc;
5728 }
5729 
5730 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5731 {
5732 	enum dc_irq_source irq_source;
5733 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5734 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5735 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5736 #if defined(CONFIG_DRM_AMD_DC_DCN)
5737 	struct amdgpu_display_manager *dm = &adev->dm;
5738 	unsigned long flags;
5739 #endif
5740 	int rc = 0;
5741 
5742 	if (enable) {
5743 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5744 		if (amdgpu_dm_vrr_active(acrtc_state))
5745 			rc = dm_set_vupdate_irq(crtc, true);
5746 	} else {
5747 		/* vblank irq off -> vupdate irq off */
5748 		rc = dm_set_vupdate_irq(crtc, false);
5749 	}
5750 
5751 	if (rc)
5752 		return rc;
5753 
5754 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5755 
5756 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5757 		return -EBUSY;
5758 
5759 	if (amdgpu_in_reset(adev))
5760 		return 0;
5761 
5762 #if defined(CONFIG_DRM_AMD_DC_DCN)
5763 	spin_lock_irqsave(&dm->vblank_lock, flags);
5764 	dm->vblank_workqueue->dm = dm;
5765 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5766 	dm->vblank_workqueue->enable = enable;
5767 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5768 	schedule_work(&dm->vblank_workqueue->mall_work);
5769 #endif
5770 
5771 	return 0;
5772 }
5773 
5774 static int dm_enable_vblank(struct drm_crtc *crtc)
5775 {
5776 	return dm_set_vblank(crtc, true);
5777 }
5778 
5779 static void dm_disable_vblank(struct drm_crtc *crtc)
5780 {
5781 	dm_set_vblank(crtc, false);
5782 }
5783 
5784 /* Implemented only the options currently availible for the driver */
5785 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5786 	.reset = dm_crtc_reset_state,
5787 	.destroy = amdgpu_dm_crtc_destroy,
5788 	.set_config = drm_atomic_helper_set_config,
5789 	.page_flip = drm_atomic_helper_page_flip,
5790 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5791 	.atomic_destroy_state = dm_crtc_destroy_state,
5792 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5793 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5794 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5795 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5796 	.enable_vblank = dm_enable_vblank,
5797 	.disable_vblank = dm_disable_vblank,
5798 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5799 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5800 	.late_register = amdgpu_dm_crtc_late_register,
5801 #endif
5802 };
5803 
5804 static enum drm_connector_status
5805 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5806 {
5807 	bool connected;
5808 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5809 
5810 	/*
5811 	 * Notes:
5812 	 * 1. This interface is NOT called in context of HPD irq.
5813 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5814 	 * makes it a bad place for *any* MST-related activity.
5815 	 */
5816 
5817 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5818 	    !aconnector->fake_enable)
5819 		connected = (aconnector->dc_sink != NULL);
5820 	else
5821 		connected = (aconnector->base.force == DRM_FORCE_ON);
5822 
5823 	update_subconnector_property(aconnector);
5824 
5825 	return (connected ? connector_status_connected :
5826 			connector_status_disconnected);
5827 }
5828 
5829 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5830 					    struct drm_connector_state *connector_state,
5831 					    struct drm_property *property,
5832 					    uint64_t val)
5833 {
5834 	struct drm_device *dev = connector->dev;
5835 	struct amdgpu_device *adev = drm_to_adev(dev);
5836 	struct dm_connector_state *dm_old_state =
5837 		to_dm_connector_state(connector->state);
5838 	struct dm_connector_state *dm_new_state =
5839 		to_dm_connector_state(connector_state);
5840 
5841 	int ret = -EINVAL;
5842 
5843 	if (property == dev->mode_config.scaling_mode_property) {
5844 		enum amdgpu_rmx_type rmx_type;
5845 
5846 		switch (val) {
5847 		case DRM_MODE_SCALE_CENTER:
5848 			rmx_type = RMX_CENTER;
5849 			break;
5850 		case DRM_MODE_SCALE_ASPECT:
5851 			rmx_type = RMX_ASPECT;
5852 			break;
5853 		case DRM_MODE_SCALE_FULLSCREEN:
5854 			rmx_type = RMX_FULL;
5855 			break;
5856 		case DRM_MODE_SCALE_NONE:
5857 		default:
5858 			rmx_type = RMX_OFF;
5859 			break;
5860 		}
5861 
5862 		if (dm_old_state->scaling == rmx_type)
5863 			return 0;
5864 
5865 		dm_new_state->scaling = rmx_type;
5866 		ret = 0;
5867 	} else if (property == adev->mode_info.underscan_hborder_property) {
5868 		dm_new_state->underscan_hborder = val;
5869 		ret = 0;
5870 	} else if (property == adev->mode_info.underscan_vborder_property) {
5871 		dm_new_state->underscan_vborder = val;
5872 		ret = 0;
5873 	} else if (property == adev->mode_info.underscan_property) {
5874 		dm_new_state->underscan_enable = val;
5875 		ret = 0;
5876 	} else if (property == adev->mode_info.abm_level_property) {
5877 		dm_new_state->abm_level = val;
5878 		ret = 0;
5879 	}
5880 
5881 	return ret;
5882 }
5883 
5884 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5885 					    const struct drm_connector_state *state,
5886 					    struct drm_property *property,
5887 					    uint64_t *val)
5888 {
5889 	struct drm_device *dev = connector->dev;
5890 	struct amdgpu_device *adev = drm_to_adev(dev);
5891 	struct dm_connector_state *dm_state =
5892 		to_dm_connector_state(state);
5893 	int ret = -EINVAL;
5894 
5895 	if (property == dev->mode_config.scaling_mode_property) {
5896 		switch (dm_state->scaling) {
5897 		case RMX_CENTER:
5898 			*val = DRM_MODE_SCALE_CENTER;
5899 			break;
5900 		case RMX_ASPECT:
5901 			*val = DRM_MODE_SCALE_ASPECT;
5902 			break;
5903 		case RMX_FULL:
5904 			*val = DRM_MODE_SCALE_FULLSCREEN;
5905 			break;
5906 		case RMX_OFF:
5907 		default:
5908 			*val = DRM_MODE_SCALE_NONE;
5909 			break;
5910 		}
5911 		ret = 0;
5912 	} else if (property == adev->mode_info.underscan_hborder_property) {
5913 		*val = dm_state->underscan_hborder;
5914 		ret = 0;
5915 	} else if (property == adev->mode_info.underscan_vborder_property) {
5916 		*val = dm_state->underscan_vborder;
5917 		ret = 0;
5918 	} else if (property == adev->mode_info.underscan_property) {
5919 		*val = dm_state->underscan_enable;
5920 		ret = 0;
5921 	} else if (property == adev->mode_info.abm_level_property) {
5922 		*val = dm_state->abm_level;
5923 		ret = 0;
5924 	}
5925 
5926 	return ret;
5927 }
5928 
5929 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5930 {
5931 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5932 
5933 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5934 }
5935 
5936 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5937 {
5938 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5939 	const struct dc_link *link = aconnector->dc_link;
5940 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5941 	struct amdgpu_display_manager *dm = &adev->dm;
5942 
5943 	/*
5944 	 * Call only if mst_mgr was iniitalized before since it's not done
5945 	 * for all connector types.
5946 	 */
5947 	if (aconnector->mst_mgr.dev)
5948 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5949 
5950 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5951 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5952 
5953 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5954 	    link->type != dc_connection_none &&
5955 	    dm->backlight_dev) {
5956 		backlight_device_unregister(dm->backlight_dev);
5957 		dm->backlight_dev = NULL;
5958 	}
5959 #endif
5960 
5961 	if (aconnector->dc_em_sink)
5962 		dc_sink_release(aconnector->dc_em_sink);
5963 	aconnector->dc_em_sink = NULL;
5964 	if (aconnector->dc_sink)
5965 		dc_sink_release(aconnector->dc_sink);
5966 	aconnector->dc_sink = NULL;
5967 
5968 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5969 	drm_connector_unregister(connector);
5970 	drm_connector_cleanup(connector);
5971 	if (aconnector->i2c) {
5972 		i2c_del_adapter(&aconnector->i2c->base);
5973 		kfree(aconnector->i2c);
5974 	}
5975 	kfree(aconnector->dm_dp_aux.aux.name);
5976 
5977 	kfree(connector);
5978 }
5979 
5980 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5981 {
5982 	struct dm_connector_state *state =
5983 		to_dm_connector_state(connector->state);
5984 
5985 	if (connector->state)
5986 		__drm_atomic_helper_connector_destroy_state(connector->state);
5987 
5988 	kfree(state);
5989 
5990 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5991 
5992 	if (state) {
5993 		state->scaling = RMX_OFF;
5994 		state->underscan_enable = false;
5995 		state->underscan_hborder = 0;
5996 		state->underscan_vborder = 0;
5997 		state->base.max_requested_bpc = 8;
5998 		state->vcpi_slots = 0;
5999 		state->pbn = 0;
6000 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6001 			state->abm_level = amdgpu_dm_abm_level;
6002 
6003 		__drm_atomic_helper_connector_reset(connector, &state->base);
6004 	}
6005 }
6006 
6007 struct drm_connector_state *
6008 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6009 {
6010 	struct dm_connector_state *state =
6011 		to_dm_connector_state(connector->state);
6012 
6013 	struct dm_connector_state *new_state =
6014 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6015 
6016 	if (!new_state)
6017 		return NULL;
6018 
6019 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6020 
6021 	new_state->freesync_capable = state->freesync_capable;
6022 	new_state->abm_level = state->abm_level;
6023 	new_state->scaling = state->scaling;
6024 	new_state->underscan_enable = state->underscan_enable;
6025 	new_state->underscan_hborder = state->underscan_hborder;
6026 	new_state->underscan_vborder = state->underscan_vborder;
6027 	new_state->vcpi_slots = state->vcpi_slots;
6028 	new_state->pbn = state->pbn;
6029 	return &new_state->base;
6030 }
6031 
6032 static int
6033 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6034 {
6035 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6036 		to_amdgpu_dm_connector(connector);
6037 	int r;
6038 
6039 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6040 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6041 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6042 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6043 		if (r)
6044 			return r;
6045 	}
6046 
6047 #if defined(CONFIG_DEBUG_FS)
6048 	connector_debugfs_init(amdgpu_dm_connector);
6049 #endif
6050 
6051 	return 0;
6052 }
6053 
6054 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6055 	.reset = amdgpu_dm_connector_funcs_reset,
6056 	.detect = amdgpu_dm_connector_detect,
6057 	.fill_modes = drm_helper_probe_single_connector_modes,
6058 	.destroy = amdgpu_dm_connector_destroy,
6059 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6060 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6061 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6062 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6063 	.late_register = amdgpu_dm_connector_late_register,
6064 	.early_unregister = amdgpu_dm_connector_unregister
6065 };
6066 
6067 static int get_modes(struct drm_connector *connector)
6068 {
6069 	return amdgpu_dm_connector_get_modes(connector);
6070 }
6071 
6072 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6073 {
6074 	struct dc_sink_init_data init_params = {
6075 			.link = aconnector->dc_link,
6076 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6077 	};
6078 	struct edid *edid;
6079 
6080 	if (!aconnector->base.edid_blob_ptr) {
6081 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6082 				aconnector->base.name);
6083 
6084 		aconnector->base.force = DRM_FORCE_OFF;
6085 		aconnector->base.override_edid = false;
6086 		return;
6087 	}
6088 
6089 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6090 
6091 	aconnector->edid = edid;
6092 
6093 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6094 		aconnector->dc_link,
6095 		(uint8_t *)edid,
6096 		(edid->extensions + 1) * EDID_LENGTH,
6097 		&init_params);
6098 
6099 	if (aconnector->base.force == DRM_FORCE_ON) {
6100 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6101 		aconnector->dc_link->local_sink :
6102 		aconnector->dc_em_sink;
6103 		dc_sink_retain(aconnector->dc_sink);
6104 	}
6105 }
6106 
6107 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6108 {
6109 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6110 
6111 	/*
6112 	 * In case of headless boot with force on for DP managed connector
6113 	 * Those settings have to be != 0 to get initial modeset
6114 	 */
6115 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6116 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6117 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6118 	}
6119 
6120 
6121 	aconnector->base.override_edid = true;
6122 	create_eml_sink(aconnector);
6123 }
6124 
6125 static struct dc_stream_state *
6126 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6127 				const struct drm_display_mode *drm_mode,
6128 				const struct dm_connector_state *dm_state,
6129 				const struct dc_stream_state *old_stream)
6130 {
6131 	struct drm_connector *connector = &aconnector->base;
6132 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6133 	struct dc_stream_state *stream;
6134 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6135 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6136 	enum dc_status dc_result = DC_OK;
6137 
6138 	do {
6139 		stream = create_stream_for_sink(aconnector, drm_mode,
6140 						dm_state, old_stream,
6141 						requested_bpc);
6142 		if (stream == NULL) {
6143 			DRM_ERROR("Failed to create stream for sink!\n");
6144 			break;
6145 		}
6146 
6147 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6148 
6149 		if (dc_result != DC_OK) {
6150 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6151 				      drm_mode->hdisplay,
6152 				      drm_mode->vdisplay,
6153 				      drm_mode->clock,
6154 				      dc_result,
6155 				      dc_status_to_str(dc_result));
6156 
6157 			dc_stream_release(stream);
6158 			stream = NULL;
6159 			requested_bpc -= 2; /* lower bpc to retry validation */
6160 		}
6161 
6162 	} while (stream == NULL && requested_bpc >= 6);
6163 
6164 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6165 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6166 
6167 		aconnector->force_yuv420_output = true;
6168 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6169 						dm_state, old_stream);
6170 		aconnector->force_yuv420_output = false;
6171 	}
6172 
6173 	return stream;
6174 }
6175 
6176 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6177 				   struct drm_display_mode *mode)
6178 {
6179 	int result = MODE_ERROR;
6180 	struct dc_sink *dc_sink;
6181 	/* TODO: Unhardcode stream count */
6182 	struct dc_stream_state *stream;
6183 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6184 
6185 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6186 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6187 		return result;
6188 
6189 	/*
6190 	 * Only run this the first time mode_valid is called to initilialize
6191 	 * EDID mgmt
6192 	 */
6193 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6194 		!aconnector->dc_em_sink)
6195 		handle_edid_mgmt(aconnector);
6196 
6197 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6198 
6199 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6200 				aconnector->base.force != DRM_FORCE_ON) {
6201 		DRM_ERROR("dc_sink is NULL!\n");
6202 		goto fail;
6203 	}
6204 
6205 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6206 	if (stream) {
6207 		dc_stream_release(stream);
6208 		result = MODE_OK;
6209 	}
6210 
6211 fail:
6212 	/* TODO: error handling*/
6213 	return result;
6214 }
6215 
6216 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6217 				struct dc_info_packet *out)
6218 {
6219 	struct hdmi_drm_infoframe frame;
6220 	unsigned char buf[30]; /* 26 + 4 */
6221 	ssize_t len;
6222 	int ret, i;
6223 
6224 	memset(out, 0, sizeof(*out));
6225 
6226 	if (!state->hdr_output_metadata)
6227 		return 0;
6228 
6229 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6230 	if (ret)
6231 		return ret;
6232 
6233 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6234 	if (len < 0)
6235 		return (int)len;
6236 
6237 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6238 	if (len != 30)
6239 		return -EINVAL;
6240 
6241 	/* Prepare the infopacket for DC. */
6242 	switch (state->connector->connector_type) {
6243 	case DRM_MODE_CONNECTOR_HDMIA:
6244 		out->hb0 = 0x87; /* type */
6245 		out->hb1 = 0x01; /* version */
6246 		out->hb2 = 0x1A; /* length */
6247 		out->sb[0] = buf[3]; /* checksum */
6248 		i = 1;
6249 		break;
6250 
6251 	case DRM_MODE_CONNECTOR_DisplayPort:
6252 	case DRM_MODE_CONNECTOR_eDP:
6253 		out->hb0 = 0x00; /* sdp id, zero */
6254 		out->hb1 = 0x87; /* type */
6255 		out->hb2 = 0x1D; /* payload len - 1 */
6256 		out->hb3 = (0x13 << 2); /* sdp version */
6257 		out->sb[0] = 0x01; /* version */
6258 		out->sb[1] = 0x1A; /* length */
6259 		i = 2;
6260 		break;
6261 
6262 	default:
6263 		return -EINVAL;
6264 	}
6265 
6266 	memcpy(&out->sb[i], &buf[4], 26);
6267 	out->valid = true;
6268 
6269 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6270 		       sizeof(out->sb), false);
6271 
6272 	return 0;
6273 }
6274 
6275 static bool
6276 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6277 			  const struct drm_connector_state *new_state)
6278 {
6279 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6280 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6281 
6282 	if (old_blob != new_blob) {
6283 		if (old_blob && new_blob &&
6284 		    old_blob->length == new_blob->length)
6285 			return memcmp(old_blob->data, new_blob->data,
6286 				      old_blob->length);
6287 
6288 		return true;
6289 	}
6290 
6291 	return false;
6292 }
6293 
6294 static int
6295 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6296 				 struct drm_atomic_state *state)
6297 {
6298 	struct drm_connector_state *new_con_state =
6299 		drm_atomic_get_new_connector_state(state, conn);
6300 	struct drm_connector_state *old_con_state =
6301 		drm_atomic_get_old_connector_state(state, conn);
6302 	struct drm_crtc *crtc = new_con_state->crtc;
6303 	struct drm_crtc_state *new_crtc_state;
6304 	int ret;
6305 
6306 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6307 
6308 	if (!crtc)
6309 		return 0;
6310 
6311 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6312 		struct dc_info_packet hdr_infopacket;
6313 
6314 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6315 		if (ret)
6316 			return ret;
6317 
6318 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6319 		if (IS_ERR(new_crtc_state))
6320 			return PTR_ERR(new_crtc_state);
6321 
6322 		/*
6323 		 * DC considers the stream backends changed if the
6324 		 * static metadata changes. Forcing the modeset also
6325 		 * gives a simple way for userspace to switch from
6326 		 * 8bpc to 10bpc when setting the metadata to enter
6327 		 * or exit HDR.
6328 		 *
6329 		 * Changing the static metadata after it's been
6330 		 * set is permissible, however. So only force a
6331 		 * modeset if we're entering or exiting HDR.
6332 		 */
6333 		new_crtc_state->mode_changed =
6334 			!old_con_state->hdr_output_metadata ||
6335 			!new_con_state->hdr_output_metadata;
6336 	}
6337 
6338 	return 0;
6339 }
6340 
6341 static const struct drm_connector_helper_funcs
6342 amdgpu_dm_connector_helper_funcs = {
6343 	/*
6344 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6345 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6346 	 * are missing after user start lightdm. So we need to renew modes list.
6347 	 * in get_modes call back, not just return the modes count
6348 	 */
6349 	.get_modes = get_modes,
6350 	.mode_valid = amdgpu_dm_connector_mode_valid,
6351 	.atomic_check = amdgpu_dm_connector_atomic_check,
6352 };
6353 
6354 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6355 {
6356 }
6357 
6358 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6359 {
6360 	struct drm_atomic_state *state = new_crtc_state->state;
6361 	struct drm_plane *plane;
6362 	int num_active = 0;
6363 
6364 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6365 		struct drm_plane_state *new_plane_state;
6366 
6367 		/* Cursor planes are "fake". */
6368 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6369 			continue;
6370 
6371 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6372 
6373 		if (!new_plane_state) {
6374 			/*
6375 			 * The plane is enable on the CRTC and hasn't changed
6376 			 * state. This means that it previously passed
6377 			 * validation and is therefore enabled.
6378 			 */
6379 			num_active += 1;
6380 			continue;
6381 		}
6382 
6383 		/* We need a framebuffer to be considered enabled. */
6384 		num_active += (new_plane_state->fb != NULL);
6385 	}
6386 
6387 	return num_active;
6388 }
6389 
6390 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6391 					 struct drm_crtc_state *new_crtc_state)
6392 {
6393 	struct dm_crtc_state *dm_new_crtc_state =
6394 		to_dm_crtc_state(new_crtc_state);
6395 
6396 	dm_new_crtc_state->active_planes = 0;
6397 
6398 	if (!dm_new_crtc_state->stream)
6399 		return;
6400 
6401 	dm_new_crtc_state->active_planes =
6402 		count_crtc_active_planes(new_crtc_state);
6403 }
6404 
6405 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6406 				       struct drm_atomic_state *state)
6407 {
6408 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6409 									  crtc);
6410 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6411 	struct dc *dc = adev->dm.dc;
6412 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6413 	int ret = -EINVAL;
6414 
6415 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6416 
6417 	dm_update_crtc_active_planes(crtc, crtc_state);
6418 
6419 	if (unlikely(!dm_crtc_state->stream &&
6420 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6421 		WARN_ON(1);
6422 		return ret;
6423 	}
6424 
6425 	/*
6426 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6427 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6428 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6429 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6430 	 */
6431 	if (crtc_state->enable &&
6432 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6433 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6434 		return -EINVAL;
6435 	}
6436 
6437 	/* In some use cases, like reset, no stream is attached */
6438 	if (!dm_crtc_state->stream)
6439 		return 0;
6440 
6441 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6442 		return 0;
6443 
6444 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6445 	return ret;
6446 }
6447 
6448 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6449 				      const struct drm_display_mode *mode,
6450 				      struct drm_display_mode *adjusted_mode)
6451 {
6452 	return true;
6453 }
6454 
6455 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6456 	.disable = dm_crtc_helper_disable,
6457 	.atomic_check = dm_crtc_helper_atomic_check,
6458 	.mode_fixup = dm_crtc_helper_mode_fixup,
6459 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6460 };
6461 
6462 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6463 {
6464 
6465 }
6466 
6467 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6468 {
6469 	switch (display_color_depth) {
6470 		case COLOR_DEPTH_666:
6471 			return 6;
6472 		case COLOR_DEPTH_888:
6473 			return 8;
6474 		case COLOR_DEPTH_101010:
6475 			return 10;
6476 		case COLOR_DEPTH_121212:
6477 			return 12;
6478 		case COLOR_DEPTH_141414:
6479 			return 14;
6480 		case COLOR_DEPTH_161616:
6481 			return 16;
6482 		default:
6483 			break;
6484 		}
6485 	return 0;
6486 }
6487 
6488 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6489 					  struct drm_crtc_state *crtc_state,
6490 					  struct drm_connector_state *conn_state)
6491 {
6492 	struct drm_atomic_state *state = crtc_state->state;
6493 	struct drm_connector *connector = conn_state->connector;
6494 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6495 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6496 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6497 	struct drm_dp_mst_topology_mgr *mst_mgr;
6498 	struct drm_dp_mst_port *mst_port;
6499 	enum dc_color_depth color_depth;
6500 	int clock, bpp = 0;
6501 	bool is_y420 = false;
6502 
6503 	if (!aconnector->port || !aconnector->dc_sink)
6504 		return 0;
6505 
6506 	mst_port = aconnector->port;
6507 	mst_mgr = &aconnector->mst_port->mst_mgr;
6508 
6509 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6510 		return 0;
6511 
6512 	if (!state->duplicated) {
6513 		int max_bpc = conn_state->max_requested_bpc;
6514 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6515 				aconnector->force_yuv420_output;
6516 		color_depth = convert_color_depth_from_display_info(connector,
6517 								    is_y420,
6518 								    max_bpc);
6519 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6520 		clock = adjusted_mode->clock;
6521 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6522 	}
6523 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6524 									   mst_mgr,
6525 									   mst_port,
6526 									   dm_new_connector_state->pbn,
6527 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6528 	if (dm_new_connector_state->vcpi_slots < 0) {
6529 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6530 		return dm_new_connector_state->vcpi_slots;
6531 	}
6532 	return 0;
6533 }
6534 
6535 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6536 	.disable = dm_encoder_helper_disable,
6537 	.atomic_check = dm_encoder_helper_atomic_check
6538 };
6539 
6540 #if defined(CONFIG_DRM_AMD_DC_DCN)
6541 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6542 					    struct dc_state *dc_state)
6543 {
6544 	struct dc_stream_state *stream = NULL;
6545 	struct drm_connector *connector;
6546 	struct drm_connector_state *new_con_state, *old_con_state;
6547 	struct amdgpu_dm_connector *aconnector;
6548 	struct dm_connector_state *dm_conn_state;
6549 	int i, j, clock, bpp;
6550 	int vcpi, pbn_div, pbn = 0;
6551 
6552 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6553 
6554 		aconnector = to_amdgpu_dm_connector(connector);
6555 
6556 		if (!aconnector->port)
6557 			continue;
6558 
6559 		if (!new_con_state || !new_con_state->crtc)
6560 			continue;
6561 
6562 		dm_conn_state = to_dm_connector_state(new_con_state);
6563 
6564 		for (j = 0; j < dc_state->stream_count; j++) {
6565 			stream = dc_state->streams[j];
6566 			if (!stream)
6567 				continue;
6568 
6569 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6570 				break;
6571 
6572 			stream = NULL;
6573 		}
6574 
6575 		if (!stream)
6576 			continue;
6577 
6578 		if (stream->timing.flags.DSC != 1) {
6579 			drm_dp_mst_atomic_enable_dsc(state,
6580 						     aconnector->port,
6581 						     dm_conn_state->pbn,
6582 						     0,
6583 						     false);
6584 			continue;
6585 		}
6586 
6587 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6588 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6589 		clock = stream->timing.pix_clk_100hz / 10;
6590 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6591 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6592 						    aconnector->port,
6593 						    pbn, pbn_div,
6594 						    true);
6595 		if (vcpi < 0)
6596 			return vcpi;
6597 
6598 		dm_conn_state->pbn = pbn;
6599 		dm_conn_state->vcpi_slots = vcpi;
6600 	}
6601 	return 0;
6602 }
6603 #endif
6604 
6605 static void dm_drm_plane_reset(struct drm_plane *plane)
6606 {
6607 	struct dm_plane_state *amdgpu_state = NULL;
6608 
6609 	if (plane->state)
6610 		plane->funcs->atomic_destroy_state(plane, plane->state);
6611 
6612 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6613 	WARN_ON(amdgpu_state == NULL);
6614 
6615 	if (amdgpu_state)
6616 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6617 }
6618 
6619 static struct drm_plane_state *
6620 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6621 {
6622 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6623 
6624 	old_dm_plane_state = to_dm_plane_state(plane->state);
6625 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6626 	if (!dm_plane_state)
6627 		return NULL;
6628 
6629 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6630 
6631 	if (old_dm_plane_state->dc_state) {
6632 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6633 		dc_plane_state_retain(dm_plane_state->dc_state);
6634 	}
6635 
6636 	return &dm_plane_state->base;
6637 }
6638 
6639 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6640 				struct drm_plane_state *state)
6641 {
6642 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6643 
6644 	if (dm_plane_state->dc_state)
6645 		dc_plane_state_release(dm_plane_state->dc_state);
6646 
6647 	drm_atomic_helper_plane_destroy_state(plane, state);
6648 }
6649 
6650 static const struct drm_plane_funcs dm_plane_funcs = {
6651 	.update_plane	= drm_atomic_helper_update_plane,
6652 	.disable_plane	= drm_atomic_helper_disable_plane,
6653 	.destroy	= drm_primary_helper_destroy,
6654 	.reset = dm_drm_plane_reset,
6655 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6656 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6657 	.format_mod_supported = dm_plane_format_mod_supported,
6658 };
6659 
6660 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6661 				      struct drm_plane_state *new_state)
6662 {
6663 	struct amdgpu_framebuffer *afb;
6664 	struct drm_gem_object *obj;
6665 	struct amdgpu_device *adev;
6666 	struct amdgpu_bo *rbo;
6667 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6668 	struct list_head list;
6669 	struct ttm_validate_buffer tv;
6670 	struct ww_acquire_ctx ticket;
6671 	uint32_t domain;
6672 	int r;
6673 
6674 	if (!new_state->fb) {
6675 		DRM_DEBUG_KMS("No FB bound\n");
6676 		return 0;
6677 	}
6678 
6679 	afb = to_amdgpu_framebuffer(new_state->fb);
6680 	obj = new_state->fb->obj[0];
6681 	rbo = gem_to_amdgpu_bo(obj);
6682 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6683 	INIT_LIST_HEAD(&list);
6684 
6685 	tv.bo = &rbo->tbo;
6686 	tv.num_shared = 1;
6687 	list_add(&tv.head, &list);
6688 
6689 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6690 	if (r) {
6691 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6692 		return r;
6693 	}
6694 
6695 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6696 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6697 	else
6698 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6699 
6700 	r = amdgpu_bo_pin(rbo, domain);
6701 	if (unlikely(r != 0)) {
6702 		if (r != -ERESTARTSYS)
6703 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6704 		ttm_eu_backoff_reservation(&ticket, &list);
6705 		return r;
6706 	}
6707 
6708 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6709 	if (unlikely(r != 0)) {
6710 		amdgpu_bo_unpin(rbo);
6711 		ttm_eu_backoff_reservation(&ticket, &list);
6712 		DRM_ERROR("%p bind failed\n", rbo);
6713 		return r;
6714 	}
6715 
6716 	ttm_eu_backoff_reservation(&ticket, &list);
6717 
6718 	afb->address = amdgpu_bo_gpu_offset(rbo);
6719 
6720 	amdgpu_bo_ref(rbo);
6721 
6722 	/**
6723 	 * We don't do surface updates on planes that have been newly created,
6724 	 * but we also don't have the afb->address during atomic check.
6725 	 *
6726 	 * Fill in buffer attributes depending on the address here, but only on
6727 	 * newly created planes since they're not being used by DC yet and this
6728 	 * won't modify global state.
6729 	 */
6730 	dm_plane_state_old = to_dm_plane_state(plane->state);
6731 	dm_plane_state_new = to_dm_plane_state(new_state);
6732 
6733 	if (dm_plane_state_new->dc_state &&
6734 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6735 		struct dc_plane_state *plane_state =
6736 			dm_plane_state_new->dc_state;
6737 		bool force_disable_dcc = !plane_state->dcc.enable;
6738 
6739 		fill_plane_buffer_attributes(
6740 			adev, afb, plane_state->format, plane_state->rotation,
6741 			afb->tiling_flags,
6742 			&plane_state->tiling_info, &plane_state->plane_size,
6743 			&plane_state->dcc, &plane_state->address,
6744 			afb->tmz_surface, force_disable_dcc);
6745 	}
6746 
6747 	return 0;
6748 }
6749 
6750 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6751 				       struct drm_plane_state *old_state)
6752 {
6753 	struct amdgpu_bo *rbo;
6754 	int r;
6755 
6756 	if (!old_state->fb)
6757 		return;
6758 
6759 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6760 	r = amdgpu_bo_reserve(rbo, false);
6761 	if (unlikely(r)) {
6762 		DRM_ERROR("failed to reserve rbo before unpin\n");
6763 		return;
6764 	}
6765 
6766 	amdgpu_bo_unpin(rbo);
6767 	amdgpu_bo_unreserve(rbo);
6768 	amdgpu_bo_unref(&rbo);
6769 }
6770 
6771 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6772 				       struct drm_crtc_state *new_crtc_state)
6773 {
6774 	struct drm_framebuffer *fb = state->fb;
6775 	int min_downscale, max_upscale;
6776 	int min_scale = 0;
6777 	int max_scale = INT_MAX;
6778 
6779 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6780 	if (fb && state->crtc) {
6781 		/* Validate viewport to cover the case when only the position changes */
6782 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6783 			int viewport_width = state->crtc_w;
6784 			int viewport_height = state->crtc_h;
6785 
6786 			if (state->crtc_x < 0)
6787 				viewport_width += state->crtc_x;
6788 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6789 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6790 
6791 			if (state->crtc_y < 0)
6792 				viewport_height += state->crtc_y;
6793 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6794 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6795 
6796 			if (viewport_width < 0 || viewport_height < 0) {
6797 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6798 				return -EINVAL;
6799 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6800 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6801 				return -EINVAL;
6802 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6803 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6804 				return -EINVAL;
6805 			}
6806 
6807 		}
6808 
6809 		/* Get min/max allowed scaling factors from plane caps. */
6810 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6811 					     &min_downscale, &max_upscale);
6812 		/*
6813 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6814 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6815 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6816 		 */
6817 		min_scale = (1000 << 16) / max_upscale;
6818 		max_scale = (1000 << 16) / min_downscale;
6819 	}
6820 
6821 	return drm_atomic_helper_check_plane_state(
6822 		state, new_crtc_state, min_scale, max_scale, true, true);
6823 }
6824 
6825 static int dm_plane_atomic_check(struct drm_plane *plane,
6826 				 struct drm_atomic_state *state)
6827 {
6828 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6829 										 plane);
6830 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6831 	struct dc *dc = adev->dm.dc;
6832 	struct dm_plane_state *dm_plane_state;
6833 	struct dc_scaling_info scaling_info;
6834 	struct drm_crtc_state *new_crtc_state;
6835 	int ret;
6836 
6837 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6838 
6839 	dm_plane_state = to_dm_plane_state(new_plane_state);
6840 
6841 	if (!dm_plane_state->dc_state)
6842 		return 0;
6843 
6844 	new_crtc_state =
6845 		drm_atomic_get_new_crtc_state(state,
6846 					      new_plane_state->crtc);
6847 	if (!new_crtc_state)
6848 		return -EINVAL;
6849 
6850 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6851 	if (ret)
6852 		return ret;
6853 
6854 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6855 	if (ret)
6856 		return ret;
6857 
6858 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6859 		return 0;
6860 
6861 	return -EINVAL;
6862 }
6863 
6864 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6865 				       struct drm_atomic_state *state)
6866 {
6867 	/* Only support async updates on cursor planes. */
6868 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6869 		return -EINVAL;
6870 
6871 	return 0;
6872 }
6873 
6874 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6875 					 struct drm_atomic_state *state)
6876 {
6877 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6878 									   plane);
6879 	struct drm_plane_state *old_state =
6880 		drm_atomic_get_old_plane_state(state, plane);
6881 
6882 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6883 
6884 	swap(plane->state->fb, new_state->fb);
6885 
6886 	plane->state->src_x = new_state->src_x;
6887 	plane->state->src_y = new_state->src_y;
6888 	plane->state->src_w = new_state->src_w;
6889 	plane->state->src_h = new_state->src_h;
6890 	plane->state->crtc_x = new_state->crtc_x;
6891 	plane->state->crtc_y = new_state->crtc_y;
6892 	plane->state->crtc_w = new_state->crtc_w;
6893 	plane->state->crtc_h = new_state->crtc_h;
6894 
6895 	handle_cursor_update(plane, old_state);
6896 }
6897 
6898 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6899 	.prepare_fb = dm_plane_helper_prepare_fb,
6900 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6901 	.atomic_check = dm_plane_atomic_check,
6902 	.atomic_async_check = dm_plane_atomic_async_check,
6903 	.atomic_async_update = dm_plane_atomic_async_update
6904 };
6905 
6906 /*
6907  * TODO: these are currently initialized to rgb formats only.
6908  * For future use cases we should either initialize them dynamically based on
6909  * plane capabilities, or initialize this array to all formats, so internal drm
6910  * check will succeed, and let DC implement proper check
6911  */
6912 static const uint32_t rgb_formats[] = {
6913 	DRM_FORMAT_XRGB8888,
6914 	DRM_FORMAT_ARGB8888,
6915 	DRM_FORMAT_RGBA8888,
6916 	DRM_FORMAT_XRGB2101010,
6917 	DRM_FORMAT_XBGR2101010,
6918 	DRM_FORMAT_ARGB2101010,
6919 	DRM_FORMAT_ABGR2101010,
6920 	DRM_FORMAT_XBGR8888,
6921 	DRM_FORMAT_ABGR8888,
6922 	DRM_FORMAT_RGB565,
6923 };
6924 
6925 static const uint32_t overlay_formats[] = {
6926 	DRM_FORMAT_XRGB8888,
6927 	DRM_FORMAT_ARGB8888,
6928 	DRM_FORMAT_RGBA8888,
6929 	DRM_FORMAT_XBGR8888,
6930 	DRM_FORMAT_ABGR8888,
6931 	DRM_FORMAT_RGB565
6932 };
6933 
6934 static const u32 cursor_formats[] = {
6935 	DRM_FORMAT_ARGB8888
6936 };
6937 
6938 static int get_plane_formats(const struct drm_plane *plane,
6939 			     const struct dc_plane_cap *plane_cap,
6940 			     uint32_t *formats, int max_formats)
6941 {
6942 	int i, num_formats = 0;
6943 
6944 	/*
6945 	 * TODO: Query support for each group of formats directly from
6946 	 * DC plane caps. This will require adding more formats to the
6947 	 * caps list.
6948 	 */
6949 
6950 	switch (plane->type) {
6951 	case DRM_PLANE_TYPE_PRIMARY:
6952 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6953 			if (num_formats >= max_formats)
6954 				break;
6955 
6956 			formats[num_formats++] = rgb_formats[i];
6957 		}
6958 
6959 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6960 			formats[num_formats++] = DRM_FORMAT_NV12;
6961 		if (plane_cap && plane_cap->pixel_format_support.p010)
6962 			formats[num_formats++] = DRM_FORMAT_P010;
6963 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6964 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6965 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6966 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6967 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6968 		}
6969 		break;
6970 
6971 	case DRM_PLANE_TYPE_OVERLAY:
6972 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6973 			if (num_formats >= max_formats)
6974 				break;
6975 
6976 			formats[num_formats++] = overlay_formats[i];
6977 		}
6978 		break;
6979 
6980 	case DRM_PLANE_TYPE_CURSOR:
6981 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6982 			if (num_formats >= max_formats)
6983 				break;
6984 
6985 			formats[num_formats++] = cursor_formats[i];
6986 		}
6987 		break;
6988 	}
6989 
6990 	return num_formats;
6991 }
6992 
6993 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6994 				struct drm_plane *plane,
6995 				unsigned long possible_crtcs,
6996 				const struct dc_plane_cap *plane_cap)
6997 {
6998 	uint32_t formats[32];
6999 	int num_formats;
7000 	int res = -EPERM;
7001 	unsigned int supported_rotations;
7002 	uint64_t *modifiers = NULL;
7003 
7004 	num_formats = get_plane_formats(plane, plane_cap, formats,
7005 					ARRAY_SIZE(formats));
7006 
7007 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7008 	if (res)
7009 		return res;
7010 
7011 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7012 				       &dm_plane_funcs, formats, num_formats,
7013 				       modifiers, plane->type, NULL);
7014 	kfree(modifiers);
7015 	if (res)
7016 		return res;
7017 
7018 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7019 	    plane_cap && plane_cap->per_pixel_alpha) {
7020 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7021 					  BIT(DRM_MODE_BLEND_PREMULTI);
7022 
7023 		drm_plane_create_alpha_property(plane);
7024 		drm_plane_create_blend_mode_property(plane, blend_caps);
7025 	}
7026 
7027 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7028 	    plane_cap &&
7029 	    (plane_cap->pixel_format_support.nv12 ||
7030 	     plane_cap->pixel_format_support.p010)) {
7031 		/* This only affects YUV formats. */
7032 		drm_plane_create_color_properties(
7033 			plane,
7034 			BIT(DRM_COLOR_YCBCR_BT601) |
7035 			BIT(DRM_COLOR_YCBCR_BT709) |
7036 			BIT(DRM_COLOR_YCBCR_BT2020),
7037 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7038 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7039 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7040 	}
7041 
7042 	supported_rotations =
7043 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7044 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7045 
7046 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7047 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7048 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7049 						   supported_rotations);
7050 
7051 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7052 
7053 	/* Create (reset) the plane state */
7054 	if (plane->funcs->reset)
7055 		plane->funcs->reset(plane);
7056 
7057 	return 0;
7058 }
7059 
7060 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7061 			       struct drm_plane *plane,
7062 			       uint32_t crtc_index)
7063 {
7064 	struct amdgpu_crtc *acrtc = NULL;
7065 	struct drm_plane *cursor_plane;
7066 
7067 	int res = -ENOMEM;
7068 
7069 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7070 	if (!cursor_plane)
7071 		goto fail;
7072 
7073 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7074 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7075 
7076 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7077 	if (!acrtc)
7078 		goto fail;
7079 
7080 	res = drm_crtc_init_with_planes(
7081 			dm->ddev,
7082 			&acrtc->base,
7083 			plane,
7084 			cursor_plane,
7085 			&amdgpu_dm_crtc_funcs, NULL);
7086 
7087 	if (res)
7088 		goto fail;
7089 
7090 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7091 
7092 	/* Create (reset) the plane state */
7093 	if (acrtc->base.funcs->reset)
7094 		acrtc->base.funcs->reset(&acrtc->base);
7095 
7096 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7097 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7098 
7099 	acrtc->crtc_id = crtc_index;
7100 	acrtc->base.enabled = false;
7101 	acrtc->otg_inst = -1;
7102 
7103 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7104 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7105 				   true, MAX_COLOR_LUT_ENTRIES);
7106 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7107 
7108 	return 0;
7109 
7110 fail:
7111 	kfree(acrtc);
7112 	kfree(cursor_plane);
7113 	return res;
7114 }
7115 
7116 
7117 static int to_drm_connector_type(enum signal_type st)
7118 {
7119 	switch (st) {
7120 	case SIGNAL_TYPE_HDMI_TYPE_A:
7121 		return DRM_MODE_CONNECTOR_HDMIA;
7122 	case SIGNAL_TYPE_EDP:
7123 		return DRM_MODE_CONNECTOR_eDP;
7124 	case SIGNAL_TYPE_LVDS:
7125 		return DRM_MODE_CONNECTOR_LVDS;
7126 	case SIGNAL_TYPE_RGB:
7127 		return DRM_MODE_CONNECTOR_VGA;
7128 	case SIGNAL_TYPE_DISPLAY_PORT:
7129 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7130 		return DRM_MODE_CONNECTOR_DisplayPort;
7131 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7132 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7133 		return DRM_MODE_CONNECTOR_DVID;
7134 	case SIGNAL_TYPE_VIRTUAL:
7135 		return DRM_MODE_CONNECTOR_VIRTUAL;
7136 
7137 	default:
7138 		return DRM_MODE_CONNECTOR_Unknown;
7139 	}
7140 }
7141 
7142 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7143 {
7144 	struct drm_encoder *encoder;
7145 
7146 	/* There is only one encoder per connector */
7147 	drm_connector_for_each_possible_encoder(connector, encoder)
7148 		return encoder;
7149 
7150 	return NULL;
7151 }
7152 
7153 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7154 {
7155 	struct drm_encoder *encoder;
7156 	struct amdgpu_encoder *amdgpu_encoder;
7157 
7158 	encoder = amdgpu_dm_connector_to_encoder(connector);
7159 
7160 	if (encoder == NULL)
7161 		return;
7162 
7163 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7164 
7165 	amdgpu_encoder->native_mode.clock = 0;
7166 
7167 	if (!list_empty(&connector->probed_modes)) {
7168 		struct drm_display_mode *preferred_mode = NULL;
7169 
7170 		list_for_each_entry(preferred_mode,
7171 				    &connector->probed_modes,
7172 				    head) {
7173 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7174 				amdgpu_encoder->native_mode = *preferred_mode;
7175 
7176 			break;
7177 		}
7178 
7179 	}
7180 }
7181 
7182 static struct drm_display_mode *
7183 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7184 			     char *name,
7185 			     int hdisplay, int vdisplay)
7186 {
7187 	struct drm_device *dev = encoder->dev;
7188 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7189 	struct drm_display_mode *mode = NULL;
7190 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7191 
7192 	mode = drm_mode_duplicate(dev, native_mode);
7193 
7194 	if (mode == NULL)
7195 		return NULL;
7196 
7197 	mode->hdisplay = hdisplay;
7198 	mode->vdisplay = vdisplay;
7199 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7200 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7201 
7202 	return mode;
7203 
7204 }
7205 
7206 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7207 						 struct drm_connector *connector)
7208 {
7209 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7210 	struct drm_display_mode *mode = NULL;
7211 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7212 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7213 				to_amdgpu_dm_connector(connector);
7214 	int i;
7215 	int n;
7216 	struct mode_size {
7217 		char name[DRM_DISPLAY_MODE_LEN];
7218 		int w;
7219 		int h;
7220 	} common_modes[] = {
7221 		{  "640x480",  640,  480},
7222 		{  "800x600",  800,  600},
7223 		{ "1024x768", 1024,  768},
7224 		{ "1280x720", 1280,  720},
7225 		{ "1280x800", 1280,  800},
7226 		{"1280x1024", 1280, 1024},
7227 		{ "1440x900", 1440,  900},
7228 		{"1680x1050", 1680, 1050},
7229 		{"1600x1200", 1600, 1200},
7230 		{"1920x1080", 1920, 1080},
7231 		{"1920x1200", 1920, 1200}
7232 	};
7233 
7234 	n = ARRAY_SIZE(common_modes);
7235 
7236 	for (i = 0; i < n; i++) {
7237 		struct drm_display_mode *curmode = NULL;
7238 		bool mode_existed = false;
7239 
7240 		if (common_modes[i].w > native_mode->hdisplay ||
7241 		    common_modes[i].h > native_mode->vdisplay ||
7242 		   (common_modes[i].w == native_mode->hdisplay &&
7243 		    common_modes[i].h == native_mode->vdisplay))
7244 			continue;
7245 
7246 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7247 			if (common_modes[i].w == curmode->hdisplay &&
7248 			    common_modes[i].h == curmode->vdisplay) {
7249 				mode_existed = true;
7250 				break;
7251 			}
7252 		}
7253 
7254 		if (mode_existed)
7255 			continue;
7256 
7257 		mode = amdgpu_dm_create_common_mode(encoder,
7258 				common_modes[i].name, common_modes[i].w,
7259 				common_modes[i].h);
7260 		drm_mode_probed_add(connector, mode);
7261 		amdgpu_dm_connector->num_modes++;
7262 	}
7263 }
7264 
7265 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7266 					      struct edid *edid)
7267 {
7268 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7269 			to_amdgpu_dm_connector(connector);
7270 
7271 	if (edid) {
7272 		/* empty probed_modes */
7273 		INIT_LIST_HEAD(&connector->probed_modes);
7274 		amdgpu_dm_connector->num_modes =
7275 				drm_add_edid_modes(connector, edid);
7276 
7277 		/* sorting the probed modes before calling function
7278 		 * amdgpu_dm_get_native_mode() since EDID can have
7279 		 * more than one preferred mode. The modes that are
7280 		 * later in the probed mode list could be of higher
7281 		 * and preferred resolution. For example, 3840x2160
7282 		 * resolution in base EDID preferred timing and 4096x2160
7283 		 * preferred resolution in DID extension block later.
7284 		 */
7285 		drm_mode_sort(&connector->probed_modes);
7286 		amdgpu_dm_get_native_mode(connector);
7287 
7288 		/* Freesync capabilities are reset by calling
7289 		 * drm_add_edid_modes() and need to be
7290 		 * restored here.
7291 		 */
7292 		amdgpu_dm_update_freesync_caps(connector, edid);
7293 	} else {
7294 		amdgpu_dm_connector->num_modes = 0;
7295 	}
7296 }
7297 
7298 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7299 			      struct drm_display_mode *mode)
7300 {
7301 	struct drm_display_mode *m;
7302 
7303 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7304 		if (drm_mode_equal(m, mode))
7305 			return true;
7306 	}
7307 
7308 	return false;
7309 }
7310 
7311 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7312 {
7313 	const struct drm_display_mode *m;
7314 	struct drm_display_mode *new_mode;
7315 	uint i;
7316 	uint32_t new_modes_count = 0;
7317 
7318 	/* Standard FPS values
7319 	 *
7320 	 * 23.976   - TV/NTSC
7321 	 * 24 	    - Cinema
7322 	 * 25 	    - TV/PAL
7323 	 * 29.97    - TV/NTSC
7324 	 * 30 	    - TV/NTSC
7325 	 * 48 	    - Cinema HFR
7326 	 * 50 	    - TV/PAL
7327 	 * 60 	    - Commonly used
7328 	 * 48,72,96 - Multiples of 24
7329 	 */
7330 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7331 					 48000, 50000, 60000, 72000, 96000 };
7332 
7333 	/*
7334 	 * Find mode with highest refresh rate with the same resolution
7335 	 * as the preferred mode. Some monitors report a preferred mode
7336 	 * with lower resolution than the highest refresh rate supported.
7337 	 */
7338 
7339 	m = get_highest_refresh_rate_mode(aconnector, true);
7340 	if (!m)
7341 		return 0;
7342 
7343 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7344 		uint64_t target_vtotal, target_vtotal_diff;
7345 		uint64_t num, den;
7346 
7347 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7348 			continue;
7349 
7350 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7351 		    common_rates[i] > aconnector->max_vfreq * 1000)
7352 			continue;
7353 
7354 		num = (unsigned long long)m->clock * 1000 * 1000;
7355 		den = common_rates[i] * (unsigned long long)m->htotal;
7356 		target_vtotal = div_u64(num, den);
7357 		target_vtotal_diff = target_vtotal - m->vtotal;
7358 
7359 		/* Check for illegal modes */
7360 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7361 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7362 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7363 			continue;
7364 
7365 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7366 		if (!new_mode)
7367 			goto out;
7368 
7369 		new_mode->vtotal += (u16)target_vtotal_diff;
7370 		new_mode->vsync_start += (u16)target_vtotal_diff;
7371 		new_mode->vsync_end += (u16)target_vtotal_diff;
7372 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7373 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7374 
7375 		if (!is_duplicate_mode(aconnector, new_mode)) {
7376 			drm_mode_probed_add(&aconnector->base, new_mode);
7377 			new_modes_count += 1;
7378 		} else
7379 			drm_mode_destroy(aconnector->base.dev, new_mode);
7380 	}
7381  out:
7382 	return new_modes_count;
7383 }
7384 
7385 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7386 						   struct edid *edid)
7387 {
7388 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7389 		to_amdgpu_dm_connector(connector);
7390 
7391 	if (!(amdgpu_freesync_vid_mode && edid))
7392 		return;
7393 
7394 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7395 		amdgpu_dm_connector->num_modes +=
7396 			add_fs_modes(amdgpu_dm_connector);
7397 }
7398 
7399 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7400 {
7401 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7402 			to_amdgpu_dm_connector(connector);
7403 	struct drm_encoder *encoder;
7404 	struct edid *edid = amdgpu_dm_connector->edid;
7405 
7406 	encoder = amdgpu_dm_connector_to_encoder(connector);
7407 
7408 	if (!drm_edid_is_valid(edid)) {
7409 		amdgpu_dm_connector->num_modes =
7410 				drm_add_modes_noedid(connector, 640, 480);
7411 	} else {
7412 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7413 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7414 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7415 	}
7416 	amdgpu_dm_fbc_init(connector);
7417 
7418 	return amdgpu_dm_connector->num_modes;
7419 }
7420 
7421 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7422 				     struct amdgpu_dm_connector *aconnector,
7423 				     int connector_type,
7424 				     struct dc_link *link,
7425 				     int link_index)
7426 {
7427 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7428 
7429 	/*
7430 	 * Some of the properties below require access to state, like bpc.
7431 	 * Allocate some default initial connector state with our reset helper.
7432 	 */
7433 	if (aconnector->base.funcs->reset)
7434 		aconnector->base.funcs->reset(&aconnector->base);
7435 
7436 	aconnector->connector_id = link_index;
7437 	aconnector->dc_link = link;
7438 	aconnector->base.interlace_allowed = false;
7439 	aconnector->base.doublescan_allowed = false;
7440 	aconnector->base.stereo_allowed = false;
7441 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7442 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7443 	aconnector->audio_inst = -1;
7444 	mutex_init(&aconnector->hpd_lock);
7445 
7446 	/*
7447 	 * configure support HPD hot plug connector_>polled default value is 0
7448 	 * which means HPD hot plug not supported
7449 	 */
7450 	switch (connector_type) {
7451 	case DRM_MODE_CONNECTOR_HDMIA:
7452 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7453 		aconnector->base.ycbcr_420_allowed =
7454 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7455 		break;
7456 	case DRM_MODE_CONNECTOR_DisplayPort:
7457 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7458 		aconnector->base.ycbcr_420_allowed =
7459 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7460 		break;
7461 	case DRM_MODE_CONNECTOR_DVID:
7462 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7463 		break;
7464 	default:
7465 		break;
7466 	}
7467 
7468 	drm_object_attach_property(&aconnector->base.base,
7469 				dm->ddev->mode_config.scaling_mode_property,
7470 				DRM_MODE_SCALE_NONE);
7471 
7472 	drm_object_attach_property(&aconnector->base.base,
7473 				adev->mode_info.underscan_property,
7474 				UNDERSCAN_OFF);
7475 	drm_object_attach_property(&aconnector->base.base,
7476 				adev->mode_info.underscan_hborder_property,
7477 				0);
7478 	drm_object_attach_property(&aconnector->base.base,
7479 				adev->mode_info.underscan_vborder_property,
7480 				0);
7481 
7482 	if (!aconnector->mst_port)
7483 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7484 
7485 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7486 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7487 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7488 
7489 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7490 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7491 		drm_object_attach_property(&aconnector->base.base,
7492 				adev->mode_info.abm_level_property, 0);
7493 	}
7494 
7495 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7496 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7497 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7498 		drm_object_attach_property(
7499 			&aconnector->base.base,
7500 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7501 
7502 		if (!aconnector->mst_port)
7503 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7504 
7505 #ifdef CONFIG_DRM_AMD_DC_HDCP
7506 		if (adev->dm.hdcp_workqueue)
7507 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7508 #endif
7509 	}
7510 }
7511 
7512 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7513 			      struct i2c_msg *msgs, int num)
7514 {
7515 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7516 	struct ddc_service *ddc_service = i2c->ddc_service;
7517 	struct i2c_command cmd;
7518 	int i;
7519 	int result = -EIO;
7520 
7521 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7522 
7523 	if (!cmd.payloads)
7524 		return result;
7525 
7526 	cmd.number_of_payloads = num;
7527 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7528 	cmd.speed = 100;
7529 
7530 	for (i = 0; i < num; i++) {
7531 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7532 		cmd.payloads[i].address = msgs[i].addr;
7533 		cmd.payloads[i].length = msgs[i].len;
7534 		cmd.payloads[i].data = msgs[i].buf;
7535 	}
7536 
7537 	if (dc_submit_i2c(
7538 			ddc_service->ctx->dc,
7539 			ddc_service->ddc_pin->hw_info.ddc_channel,
7540 			&cmd))
7541 		result = num;
7542 
7543 	kfree(cmd.payloads);
7544 	return result;
7545 }
7546 
7547 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7548 {
7549 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7550 }
7551 
7552 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7553 	.master_xfer = amdgpu_dm_i2c_xfer,
7554 	.functionality = amdgpu_dm_i2c_func,
7555 };
7556 
7557 static struct amdgpu_i2c_adapter *
7558 create_i2c(struct ddc_service *ddc_service,
7559 	   int link_index,
7560 	   int *res)
7561 {
7562 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7563 	struct amdgpu_i2c_adapter *i2c;
7564 
7565 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7566 	if (!i2c)
7567 		return NULL;
7568 	i2c->base.owner = THIS_MODULE;
7569 	i2c->base.class = I2C_CLASS_DDC;
7570 	i2c->base.dev.parent = &adev->pdev->dev;
7571 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7572 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7573 	i2c_set_adapdata(&i2c->base, i2c);
7574 	i2c->ddc_service = ddc_service;
7575 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7576 
7577 	return i2c;
7578 }
7579 
7580 
7581 /*
7582  * Note: this function assumes that dc_link_detect() was called for the
7583  * dc_link which will be represented by this aconnector.
7584  */
7585 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7586 				    struct amdgpu_dm_connector *aconnector,
7587 				    uint32_t link_index,
7588 				    struct amdgpu_encoder *aencoder)
7589 {
7590 	int res = 0;
7591 	int connector_type;
7592 	struct dc *dc = dm->dc;
7593 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7594 	struct amdgpu_i2c_adapter *i2c;
7595 
7596 	link->priv = aconnector;
7597 
7598 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7599 
7600 	i2c = create_i2c(link->ddc, link->link_index, &res);
7601 	if (!i2c) {
7602 		DRM_ERROR("Failed to create i2c adapter data\n");
7603 		return -ENOMEM;
7604 	}
7605 
7606 	aconnector->i2c = i2c;
7607 	res = i2c_add_adapter(&i2c->base);
7608 
7609 	if (res) {
7610 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7611 		goto out_free;
7612 	}
7613 
7614 	connector_type = to_drm_connector_type(link->connector_signal);
7615 
7616 	res = drm_connector_init_with_ddc(
7617 			dm->ddev,
7618 			&aconnector->base,
7619 			&amdgpu_dm_connector_funcs,
7620 			connector_type,
7621 			&i2c->base);
7622 
7623 	if (res) {
7624 		DRM_ERROR("connector_init failed\n");
7625 		aconnector->connector_id = -1;
7626 		goto out_free;
7627 	}
7628 
7629 	drm_connector_helper_add(
7630 			&aconnector->base,
7631 			&amdgpu_dm_connector_helper_funcs);
7632 
7633 	amdgpu_dm_connector_init_helper(
7634 		dm,
7635 		aconnector,
7636 		connector_type,
7637 		link,
7638 		link_index);
7639 
7640 	drm_connector_attach_encoder(
7641 		&aconnector->base, &aencoder->base);
7642 
7643 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7644 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7645 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7646 
7647 out_free:
7648 	if (res) {
7649 		kfree(i2c);
7650 		aconnector->i2c = NULL;
7651 	}
7652 	return res;
7653 }
7654 
7655 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7656 {
7657 	switch (adev->mode_info.num_crtc) {
7658 	case 1:
7659 		return 0x1;
7660 	case 2:
7661 		return 0x3;
7662 	case 3:
7663 		return 0x7;
7664 	case 4:
7665 		return 0xf;
7666 	case 5:
7667 		return 0x1f;
7668 	case 6:
7669 	default:
7670 		return 0x3f;
7671 	}
7672 }
7673 
7674 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7675 				  struct amdgpu_encoder *aencoder,
7676 				  uint32_t link_index)
7677 {
7678 	struct amdgpu_device *adev = drm_to_adev(dev);
7679 
7680 	int res = drm_encoder_init(dev,
7681 				   &aencoder->base,
7682 				   &amdgpu_dm_encoder_funcs,
7683 				   DRM_MODE_ENCODER_TMDS,
7684 				   NULL);
7685 
7686 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7687 
7688 	if (!res)
7689 		aencoder->encoder_id = link_index;
7690 	else
7691 		aencoder->encoder_id = -1;
7692 
7693 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7694 
7695 	return res;
7696 }
7697 
7698 static void manage_dm_interrupts(struct amdgpu_device *adev,
7699 				 struct amdgpu_crtc *acrtc,
7700 				 bool enable)
7701 {
7702 	/*
7703 	 * We have no guarantee that the frontend index maps to the same
7704 	 * backend index - some even map to more than one.
7705 	 *
7706 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7707 	 */
7708 	int irq_type =
7709 		amdgpu_display_crtc_idx_to_irq_type(
7710 			adev,
7711 			acrtc->crtc_id);
7712 
7713 	if (enable) {
7714 		drm_crtc_vblank_on(&acrtc->base);
7715 		amdgpu_irq_get(
7716 			adev,
7717 			&adev->pageflip_irq,
7718 			irq_type);
7719 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7720 		amdgpu_irq_get(
7721 			adev,
7722 			&adev->vline0_irq,
7723 			irq_type);
7724 #endif
7725 	} else {
7726 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7727 		amdgpu_irq_put(
7728 			adev,
7729 			&adev->vline0_irq,
7730 			irq_type);
7731 #endif
7732 		amdgpu_irq_put(
7733 			adev,
7734 			&adev->pageflip_irq,
7735 			irq_type);
7736 		drm_crtc_vblank_off(&acrtc->base);
7737 	}
7738 }
7739 
7740 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7741 				      struct amdgpu_crtc *acrtc)
7742 {
7743 	int irq_type =
7744 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7745 
7746 	/**
7747 	 * This reads the current state for the IRQ and force reapplies
7748 	 * the setting to hardware.
7749 	 */
7750 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7751 }
7752 
7753 static bool
7754 is_scaling_state_different(const struct dm_connector_state *dm_state,
7755 			   const struct dm_connector_state *old_dm_state)
7756 {
7757 	if (dm_state->scaling != old_dm_state->scaling)
7758 		return true;
7759 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7760 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7761 			return true;
7762 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7763 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7764 			return true;
7765 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7766 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7767 		return true;
7768 	return false;
7769 }
7770 
7771 #ifdef CONFIG_DRM_AMD_DC_HDCP
7772 static bool is_content_protection_different(struct drm_connector_state *state,
7773 					    const struct drm_connector_state *old_state,
7774 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7775 {
7776 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7777 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7778 
7779 	/* Handle: Type0/1 change */
7780 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7781 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7782 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7783 		return true;
7784 	}
7785 
7786 	/* CP is being re enabled, ignore this
7787 	 *
7788 	 * Handles:	ENABLED -> DESIRED
7789 	 */
7790 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7791 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7792 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7793 		return false;
7794 	}
7795 
7796 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7797 	 *
7798 	 * Handles:	UNDESIRED -> ENABLED
7799 	 */
7800 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7801 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7802 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7803 
7804 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7805 	 * hot-plug, headless s3, dpms
7806 	 *
7807 	 * Handles:	DESIRED -> DESIRED (Special case)
7808 	 */
7809 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7810 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7811 		dm_con_state->update_hdcp = false;
7812 		return true;
7813 	}
7814 
7815 	/*
7816 	 * Handles:	UNDESIRED -> UNDESIRED
7817 	 *		DESIRED -> DESIRED
7818 	 *		ENABLED -> ENABLED
7819 	 */
7820 	if (old_state->content_protection == state->content_protection)
7821 		return false;
7822 
7823 	/*
7824 	 * Handles:	UNDESIRED -> DESIRED
7825 	 *		DESIRED -> UNDESIRED
7826 	 *		ENABLED -> UNDESIRED
7827 	 */
7828 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7829 		return true;
7830 
7831 	/*
7832 	 * Handles:	DESIRED -> ENABLED
7833 	 */
7834 	return false;
7835 }
7836 
7837 #endif
7838 static void remove_stream(struct amdgpu_device *adev,
7839 			  struct amdgpu_crtc *acrtc,
7840 			  struct dc_stream_state *stream)
7841 {
7842 	/* this is the update mode case */
7843 
7844 	acrtc->otg_inst = -1;
7845 	acrtc->enabled = false;
7846 }
7847 
7848 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7849 			       struct dc_cursor_position *position)
7850 {
7851 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7852 	int x, y;
7853 	int xorigin = 0, yorigin = 0;
7854 
7855 	if (!crtc || !plane->state->fb)
7856 		return 0;
7857 
7858 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7859 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7860 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7861 			  __func__,
7862 			  plane->state->crtc_w,
7863 			  plane->state->crtc_h);
7864 		return -EINVAL;
7865 	}
7866 
7867 	x = plane->state->crtc_x;
7868 	y = plane->state->crtc_y;
7869 
7870 	if (x <= -amdgpu_crtc->max_cursor_width ||
7871 	    y <= -amdgpu_crtc->max_cursor_height)
7872 		return 0;
7873 
7874 	if (x < 0) {
7875 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7876 		x = 0;
7877 	}
7878 	if (y < 0) {
7879 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7880 		y = 0;
7881 	}
7882 	position->enable = true;
7883 	position->translate_by_source = true;
7884 	position->x = x;
7885 	position->y = y;
7886 	position->x_hotspot = xorigin;
7887 	position->y_hotspot = yorigin;
7888 
7889 	return 0;
7890 }
7891 
7892 static void handle_cursor_update(struct drm_plane *plane,
7893 				 struct drm_plane_state *old_plane_state)
7894 {
7895 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7896 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7897 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7898 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7899 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7900 	uint64_t address = afb ? afb->address : 0;
7901 	struct dc_cursor_position position = {0};
7902 	struct dc_cursor_attributes attributes;
7903 	int ret;
7904 
7905 	if (!plane->state->fb && !old_plane_state->fb)
7906 		return;
7907 
7908 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7909 		      __func__,
7910 		      amdgpu_crtc->crtc_id,
7911 		      plane->state->crtc_w,
7912 		      plane->state->crtc_h);
7913 
7914 	ret = get_cursor_position(plane, crtc, &position);
7915 	if (ret)
7916 		return;
7917 
7918 	if (!position.enable) {
7919 		/* turn off cursor */
7920 		if (crtc_state && crtc_state->stream) {
7921 			mutex_lock(&adev->dm.dc_lock);
7922 			dc_stream_set_cursor_position(crtc_state->stream,
7923 						      &position);
7924 			mutex_unlock(&adev->dm.dc_lock);
7925 		}
7926 		return;
7927 	}
7928 
7929 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7930 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7931 
7932 	memset(&attributes, 0, sizeof(attributes));
7933 	attributes.address.high_part = upper_32_bits(address);
7934 	attributes.address.low_part  = lower_32_bits(address);
7935 	attributes.width             = plane->state->crtc_w;
7936 	attributes.height            = plane->state->crtc_h;
7937 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7938 	attributes.rotation_angle    = 0;
7939 	attributes.attribute_flags.value = 0;
7940 
7941 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7942 
7943 	if (crtc_state->stream) {
7944 		mutex_lock(&adev->dm.dc_lock);
7945 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7946 							 &attributes))
7947 			DRM_ERROR("DC failed to set cursor attributes\n");
7948 
7949 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7950 						   &position))
7951 			DRM_ERROR("DC failed to set cursor position\n");
7952 		mutex_unlock(&adev->dm.dc_lock);
7953 	}
7954 }
7955 
7956 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7957 {
7958 
7959 	assert_spin_locked(&acrtc->base.dev->event_lock);
7960 	WARN_ON(acrtc->event);
7961 
7962 	acrtc->event = acrtc->base.state->event;
7963 
7964 	/* Set the flip status */
7965 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7966 
7967 	/* Mark this event as consumed */
7968 	acrtc->base.state->event = NULL;
7969 
7970 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7971 		     acrtc->crtc_id);
7972 }
7973 
7974 static void update_freesync_state_on_stream(
7975 	struct amdgpu_display_manager *dm,
7976 	struct dm_crtc_state *new_crtc_state,
7977 	struct dc_stream_state *new_stream,
7978 	struct dc_plane_state *surface,
7979 	u32 flip_timestamp_in_us)
7980 {
7981 	struct mod_vrr_params vrr_params;
7982 	struct dc_info_packet vrr_infopacket = {0};
7983 	struct amdgpu_device *adev = dm->adev;
7984 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7985 	unsigned long flags;
7986 	bool pack_sdp_v1_3 = false;
7987 
7988 	if (!new_stream)
7989 		return;
7990 
7991 	/*
7992 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7993 	 * For now it's sufficient to just guard against these conditions.
7994 	 */
7995 
7996 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7997 		return;
7998 
7999 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8000         vrr_params = acrtc->dm_irq_params.vrr_params;
8001 
8002 	if (surface) {
8003 		mod_freesync_handle_preflip(
8004 			dm->freesync_module,
8005 			surface,
8006 			new_stream,
8007 			flip_timestamp_in_us,
8008 			&vrr_params);
8009 
8010 		if (adev->family < AMDGPU_FAMILY_AI &&
8011 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8012 			mod_freesync_handle_v_update(dm->freesync_module,
8013 						     new_stream, &vrr_params);
8014 
8015 			/* Need to call this before the frame ends. */
8016 			dc_stream_adjust_vmin_vmax(dm->dc,
8017 						   new_crtc_state->stream,
8018 						   &vrr_params.adjust);
8019 		}
8020 	}
8021 
8022 	mod_freesync_build_vrr_infopacket(
8023 		dm->freesync_module,
8024 		new_stream,
8025 		&vrr_params,
8026 		PACKET_TYPE_VRR,
8027 		TRANSFER_FUNC_UNKNOWN,
8028 		&vrr_infopacket,
8029 		pack_sdp_v1_3);
8030 
8031 	new_crtc_state->freesync_timing_changed |=
8032 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8033 			&vrr_params.adjust,
8034 			sizeof(vrr_params.adjust)) != 0);
8035 
8036 	new_crtc_state->freesync_vrr_info_changed |=
8037 		(memcmp(&new_crtc_state->vrr_infopacket,
8038 			&vrr_infopacket,
8039 			sizeof(vrr_infopacket)) != 0);
8040 
8041 	acrtc->dm_irq_params.vrr_params = vrr_params;
8042 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8043 
8044 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8045 	new_stream->vrr_infopacket = vrr_infopacket;
8046 
8047 	if (new_crtc_state->freesync_vrr_info_changed)
8048 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8049 			      new_crtc_state->base.crtc->base.id,
8050 			      (int)new_crtc_state->base.vrr_enabled,
8051 			      (int)vrr_params.state);
8052 
8053 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8054 }
8055 
8056 static void update_stream_irq_parameters(
8057 	struct amdgpu_display_manager *dm,
8058 	struct dm_crtc_state *new_crtc_state)
8059 {
8060 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8061 	struct mod_vrr_params vrr_params;
8062 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8063 	struct amdgpu_device *adev = dm->adev;
8064 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8065 	unsigned long flags;
8066 
8067 	if (!new_stream)
8068 		return;
8069 
8070 	/*
8071 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8072 	 * For now it's sufficient to just guard against these conditions.
8073 	 */
8074 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8075 		return;
8076 
8077 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8078 	vrr_params = acrtc->dm_irq_params.vrr_params;
8079 
8080 	if (new_crtc_state->vrr_supported &&
8081 	    config.min_refresh_in_uhz &&
8082 	    config.max_refresh_in_uhz) {
8083 		/*
8084 		 * if freesync compatible mode was set, config.state will be set
8085 		 * in atomic check
8086 		 */
8087 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8088 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8089 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8090 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8091 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8092 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8093 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8094 		} else {
8095 			config.state = new_crtc_state->base.vrr_enabled ?
8096 						     VRR_STATE_ACTIVE_VARIABLE :
8097 						     VRR_STATE_INACTIVE;
8098 		}
8099 	} else {
8100 		config.state = VRR_STATE_UNSUPPORTED;
8101 	}
8102 
8103 	mod_freesync_build_vrr_params(dm->freesync_module,
8104 				      new_stream,
8105 				      &config, &vrr_params);
8106 
8107 	new_crtc_state->freesync_timing_changed |=
8108 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8109 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8110 
8111 	new_crtc_state->freesync_config = config;
8112 	/* Copy state for access from DM IRQ handler */
8113 	acrtc->dm_irq_params.freesync_config = config;
8114 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8115 	acrtc->dm_irq_params.vrr_params = vrr_params;
8116 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8117 }
8118 
8119 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8120 					    struct dm_crtc_state *new_state)
8121 {
8122 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8123 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8124 
8125 	if (!old_vrr_active && new_vrr_active) {
8126 		/* Transition VRR inactive -> active:
8127 		 * While VRR is active, we must not disable vblank irq, as a
8128 		 * reenable after disable would compute bogus vblank/pflip
8129 		 * timestamps if it likely happened inside display front-porch.
8130 		 *
8131 		 * We also need vupdate irq for the actual core vblank handling
8132 		 * at end of vblank.
8133 		 */
8134 		dm_set_vupdate_irq(new_state->base.crtc, true);
8135 		drm_crtc_vblank_get(new_state->base.crtc);
8136 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8137 				 __func__, new_state->base.crtc->base.id);
8138 	} else if (old_vrr_active && !new_vrr_active) {
8139 		/* Transition VRR active -> inactive:
8140 		 * Allow vblank irq disable again for fixed refresh rate.
8141 		 */
8142 		dm_set_vupdate_irq(new_state->base.crtc, false);
8143 		drm_crtc_vblank_put(new_state->base.crtc);
8144 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8145 				 __func__, new_state->base.crtc->base.id);
8146 	}
8147 }
8148 
8149 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8150 {
8151 	struct drm_plane *plane;
8152 	struct drm_plane_state *old_plane_state, *new_plane_state;
8153 	int i;
8154 
8155 	/*
8156 	 * TODO: Make this per-stream so we don't issue redundant updates for
8157 	 * commits with multiple streams.
8158 	 */
8159 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8160 				       new_plane_state, i)
8161 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8162 			handle_cursor_update(plane, old_plane_state);
8163 }
8164 
8165 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8166 				    struct dc_state *dc_state,
8167 				    struct drm_device *dev,
8168 				    struct amdgpu_display_manager *dm,
8169 				    struct drm_crtc *pcrtc,
8170 				    bool wait_for_vblank)
8171 {
8172 	uint32_t i;
8173 	uint64_t timestamp_ns;
8174 	struct drm_plane *plane;
8175 	struct drm_plane_state *old_plane_state, *new_plane_state;
8176 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8177 	struct drm_crtc_state *new_pcrtc_state =
8178 			drm_atomic_get_new_crtc_state(state, pcrtc);
8179 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8180 	struct dm_crtc_state *dm_old_crtc_state =
8181 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8182 	int planes_count = 0, vpos, hpos;
8183 	long r;
8184 	unsigned long flags;
8185 	struct amdgpu_bo *abo;
8186 	uint32_t target_vblank, last_flip_vblank;
8187 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8188 	bool pflip_present = false;
8189 	struct {
8190 		struct dc_surface_update surface_updates[MAX_SURFACES];
8191 		struct dc_plane_info plane_infos[MAX_SURFACES];
8192 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8193 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8194 		struct dc_stream_update stream_update;
8195 	} *bundle;
8196 
8197 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8198 
8199 	if (!bundle) {
8200 		dm_error("Failed to allocate update bundle\n");
8201 		goto cleanup;
8202 	}
8203 
8204 	/*
8205 	 * Disable the cursor first if we're disabling all the planes.
8206 	 * It'll remain on the screen after the planes are re-enabled
8207 	 * if we don't.
8208 	 */
8209 	if (acrtc_state->active_planes == 0)
8210 		amdgpu_dm_commit_cursors(state);
8211 
8212 	/* update planes when needed */
8213 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8214 		struct drm_crtc *crtc = new_plane_state->crtc;
8215 		struct drm_crtc_state *new_crtc_state;
8216 		struct drm_framebuffer *fb = new_plane_state->fb;
8217 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8218 		bool plane_needs_flip;
8219 		struct dc_plane_state *dc_plane;
8220 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8221 
8222 		/* Cursor plane is handled after stream updates */
8223 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8224 			continue;
8225 
8226 		if (!fb || !crtc || pcrtc != crtc)
8227 			continue;
8228 
8229 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8230 		if (!new_crtc_state->active)
8231 			continue;
8232 
8233 		dc_plane = dm_new_plane_state->dc_state;
8234 
8235 		bundle->surface_updates[planes_count].surface = dc_plane;
8236 		if (new_pcrtc_state->color_mgmt_changed) {
8237 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8238 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8239 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8240 		}
8241 
8242 		fill_dc_scaling_info(new_plane_state,
8243 				     &bundle->scaling_infos[planes_count]);
8244 
8245 		bundle->surface_updates[planes_count].scaling_info =
8246 			&bundle->scaling_infos[planes_count];
8247 
8248 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8249 
8250 		pflip_present = pflip_present || plane_needs_flip;
8251 
8252 		if (!plane_needs_flip) {
8253 			planes_count += 1;
8254 			continue;
8255 		}
8256 
8257 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8258 
8259 		/*
8260 		 * Wait for all fences on this FB. Do limited wait to avoid
8261 		 * deadlock during GPU reset when this fence will not signal
8262 		 * but we hold reservation lock for the BO.
8263 		 */
8264 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8265 							false,
8266 							msecs_to_jiffies(5000));
8267 		if (unlikely(r <= 0))
8268 			DRM_ERROR("Waiting for fences timed out!");
8269 
8270 		fill_dc_plane_info_and_addr(
8271 			dm->adev, new_plane_state,
8272 			afb->tiling_flags,
8273 			&bundle->plane_infos[planes_count],
8274 			&bundle->flip_addrs[planes_count].address,
8275 			afb->tmz_surface, false);
8276 
8277 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8278 				 new_plane_state->plane->index,
8279 				 bundle->plane_infos[planes_count].dcc.enable);
8280 
8281 		bundle->surface_updates[planes_count].plane_info =
8282 			&bundle->plane_infos[planes_count];
8283 
8284 		/*
8285 		 * Only allow immediate flips for fast updates that don't
8286 		 * change FB pitch, DCC state, rotation or mirroing.
8287 		 */
8288 		bundle->flip_addrs[planes_count].flip_immediate =
8289 			crtc->state->async_flip &&
8290 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8291 
8292 		timestamp_ns = ktime_get_ns();
8293 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8294 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8295 		bundle->surface_updates[planes_count].surface = dc_plane;
8296 
8297 		if (!bundle->surface_updates[planes_count].surface) {
8298 			DRM_ERROR("No surface for CRTC: id=%d\n",
8299 					acrtc_attach->crtc_id);
8300 			continue;
8301 		}
8302 
8303 		if (plane == pcrtc->primary)
8304 			update_freesync_state_on_stream(
8305 				dm,
8306 				acrtc_state,
8307 				acrtc_state->stream,
8308 				dc_plane,
8309 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8310 
8311 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8312 				 __func__,
8313 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8314 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8315 
8316 		planes_count += 1;
8317 
8318 	}
8319 
8320 	if (pflip_present) {
8321 		if (!vrr_active) {
8322 			/* Use old throttling in non-vrr fixed refresh rate mode
8323 			 * to keep flip scheduling based on target vblank counts
8324 			 * working in a backwards compatible way, e.g., for
8325 			 * clients using the GLX_OML_sync_control extension or
8326 			 * DRI3/Present extension with defined target_msc.
8327 			 */
8328 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8329 		}
8330 		else {
8331 			/* For variable refresh rate mode only:
8332 			 * Get vblank of last completed flip to avoid > 1 vrr
8333 			 * flips per video frame by use of throttling, but allow
8334 			 * flip programming anywhere in the possibly large
8335 			 * variable vrr vblank interval for fine-grained flip
8336 			 * timing control and more opportunity to avoid stutter
8337 			 * on late submission of flips.
8338 			 */
8339 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8340 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8341 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8342 		}
8343 
8344 		target_vblank = last_flip_vblank + wait_for_vblank;
8345 
8346 		/*
8347 		 * Wait until we're out of the vertical blank period before the one
8348 		 * targeted by the flip
8349 		 */
8350 		while ((acrtc_attach->enabled &&
8351 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8352 							    0, &vpos, &hpos, NULL,
8353 							    NULL, &pcrtc->hwmode)
8354 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8355 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8356 			(int)(target_vblank -
8357 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8358 			usleep_range(1000, 1100);
8359 		}
8360 
8361 		/**
8362 		 * Prepare the flip event for the pageflip interrupt to handle.
8363 		 *
8364 		 * This only works in the case where we've already turned on the
8365 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8366 		 * from 0 -> n planes we have to skip a hardware generated event
8367 		 * and rely on sending it from software.
8368 		 */
8369 		if (acrtc_attach->base.state->event &&
8370 		    acrtc_state->active_planes > 0) {
8371 			drm_crtc_vblank_get(pcrtc);
8372 
8373 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8374 
8375 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8376 			prepare_flip_isr(acrtc_attach);
8377 
8378 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8379 		}
8380 
8381 		if (acrtc_state->stream) {
8382 			if (acrtc_state->freesync_vrr_info_changed)
8383 				bundle->stream_update.vrr_infopacket =
8384 					&acrtc_state->stream->vrr_infopacket;
8385 		}
8386 	}
8387 
8388 	/* Update the planes if changed or disable if we don't have any. */
8389 	if ((planes_count || acrtc_state->active_planes == 0) &&
8390 		acrtc_state->stream) {
8391 		bundle->stream_update.stream = acrtc_state->stream;
8392 		if (new_pcrtc_state->mode_changed) {
8393 			bundle->stream_update.src = acrtc_state->stream->src;
8394 			bundle->stream_update.dst = acrtc_state->stream->dst;
8395 		}
8396 
8397 		if (new_pcrtc_state->color_mgmt_changed) {
8398 			/*
8399 			 * TODO: This isn't fully correct since we've actually
8400 			 * already modified the stream in place.
8401 			 */
8402 			bundle->stream_update.gamut_remap =
8403 				&acrtc_state->stream->gamut_remap_matrix;
8404 			bundle->stream_update.output_csc_transform =
8405 				&acrtc_state->stream->csc_color_matrix;
8406 			bundle->stream_update.out_transfer_func =
8407 				acrtc_state->stream->out_transfer_func;
8408 		}
8409 
8410 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8411 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8412 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8413 
8414 		/*
8415 		 * If FreeSync state on the stream has changed then we need to
8416 		 * re-adjust the min/max bounds now that DC doesn't handle this
8417 		 * as part of commit.
8418 		 */
8419 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8420 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8421 			dc_stream_adjust_vmin_vmax(
8422 				dm->dc, acrtc_state->stream,
8423 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8424 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8425 		}
8426 		mutex_lock(&dm->dc_lock);
8427 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8428 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8429 			amdgpu_dm_psr_disable(acrtc_state->stream);
8430 
8431 		dc_commit_updates_for_stream(dm->dc,
8432 						     bundle->surface_updates,
8433 						     planes_count,
8434 						     acrtc_state->stream,
8435 						     &bundle->stream_update,
8436 						     dc_state);
8437 
8438 		/**
8439 		 * Enable or disable the interrupts on the backend.
8440 		 *
8441 		 * Most pipes are put into power gating when unused.
8442 		 *
8443 		 * When power gating is enabled on a pipe we lose the
8444 		 * interrupt enablement state when power gating is disabled.
8445 		 *
8446 		 * So we need to update the IRQ control state in hardware
8447 		 * whenever the pipe turns on (since it could be previously
8448 		 * power gated) or off (since some pipes can't be power gated
8449 		 * on some ASICs).
8450 		 */
8451 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8452 			dm_update_pflip_irq_state(drm_to_adev(dev),
8453 						  acrtc_attach);
8454 
8455 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8456 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8457 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8458 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8459 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8460 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8461 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8462 			amdgpu_dm_psr_enable(acrtc_state->stream);
8463 		}
8464 
8465 		mutex_unlock(&dm->dc_lock);
8466 	}
8467 
8468 	/*
8469 	 * Update cursor state *after* programming all the planes.
8470 	 * This avoids redundant programming in the case where we're going
8471 	 * to be disabling a single plane - those pipes are being disabled.
8472 	 */
8473 	if (acrtc_state->active_planes)
8474 		amdgpu_dm_commit_cursors(state);
8475 
8476 cleanup:
8477 	kfree(bundle);
8478 }
8479 
8480 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8481 				   struct drm_atomic_state *state)
8482 {
8483 	struct amdgpu_device *adev = drm_to_adev(dev);
8484 	struct amdgpu_dm_connector *aconnector;
8485 	struct drm_connector *connector;
8486 	struct drm_connector_state *old_con_state, *new_con_state;
8487 	struct drm_crtc_state *new_crtc_state;
8488 	struct dm_crtc_state *new_dm_crtc_state;
8489 	const struct dc_stream_status *status;
8490 	int i, inst;
8491 
8492 	/* Notify device removals. */
8493 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8494 		if (old_con_state->crtc != new_con_state->crtc) {
8495 			/* CRTC changes require notification. */
8496 			goto notify;
8497 		}
8498 
8499 		if (!new_con_state->crtc)
8500 			continue;
8501 
8502 		new_crtc_state = drm_atomic_get_new_crtc_state(
8503 			state, new_con_state->crtc);
8504 
8505 		if (!new_crtc_state)
8506 			continue;
8507 
8508 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8509 			continue;
8510 
8511 	notify:
8512 		aconnector = to_amdgpu_dm_connector(connector);
8513 
8514 		mutex_lock(&adev->dm.audio_lock);
8515 		inst = aconnector->audio_inst;
8516 		aconnector->audio_inst = -1;
8517 		mutex_unlock(&adev->dm.audio_lock);
8518 
8519 		amdgpu_dm_audio_eld_notify(adev, inst);
8520 	}
8521 
8522 	/* Notify audio device additions. */
8523 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8524 		if (!new_con_state->crtc)
8525 			continue;
8526 
8527 		new_crtc_state = drm_atomic_get_new_crtc_state(
8528 			state, new_con_state->crtc);
8529 
8530 		if (!new_crtc_state)
8531 			continue;
8532 
8533 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8534 			continue;
8535 
8536 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8537 		if (!new_dm_crtc_state->stream)
8538 			continue;
8539 
8540 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8541 		if (!status)
8542 			continue;
8543 
8544 		aconnector = to_amdgpu_dm_connector(connector);
8545 
8546 		mutex_lock(&adev->dm.audio_lock);
8547 		inst = status->audio_inst;
8548 		aconnector->audio_inst = inst;
8549 		mutex_unlock(&adev->dm.audio_lock);
8550 
8551 		amdgpu_dm_audio_eld_notify(adev, inst);
8552 	}
8553 }
8554 
8555 /*
8556  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8557  * @crtc_state: the DRM CRTC state
8558  * @stream_state: the DC stream state.
8559  *
8560  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8561  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8562  */
8563 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8564 						struct dc_stream_state *stream_state)
8565 {
8566 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8567 }
8568 
8569 /**
8570  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8571  * @state: The atomic state to commit
8572  *
8573  * This will tell DC to commit the constructed DC state from atomic_check,
8574  * programming the hardware. Any failures here implies a hardware failure, since
8575  * atomic check should have filtered anything non-kosher.
8576  */
8577 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8578 {
8579 	struct drm_device *dev = state->dev;
8580 	struct amdgpu_device *adev = drm_to_adev(dev);
8581 	struct amdgpu_display_manager *dm = &adev->dm;
8582 	struct dm_atomic_state *dm_state;
8583 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8584 	uint32_t i, j;
8585 	struct drm_crtc *crtc;
8586 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8587 	unsigned long flags;
8588 	bool wait_for_vblank = true;
8589 	struct drm_connector *connector;
8590 	struct drm_connector_state *old_con_state, *new_con_state;
8591 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8592 	int crtc_disable_count = 0;
8593 	bool mode_set_reset_required = false;
8594 
8595 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8596 
8597 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8598 
8599 	dm_state = dm_atomic_get_new_state(state);
8600 	if (dm_state && dm_state->context) {
8601 		dc_state = dm_state->context;
8602 	} else {
8603 		/* No state changes, retain current state. */
8604 		dc_state_temp = dc_create_state(dm->dc);
8605 		ASSERT(dc_state_temp);
8606 		dc_state = dc_state_temp;
8607 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8608 	}
8609 
8610 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8611 				       new_crtc_state, i) {
8612 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8613 
8614 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8615 
8616 		if (old_crtc_state->active &&
8617 		    (!new_crtc_state->active ||
8618 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8619 			manage_dm_interrupts(adev, acrtc, false);
8620 			dc_stream_release(dm_old_crtc_state->stream);
8621 		}
8622 	}
8623 
8624 	drm_atomic_helper_calc_timestamping_constants(state);
8625 
8626 	/* update changed items */
8627 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8628 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8629 
8630 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8631 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8632 
8633 		DRM_DEBUG_ATOMIC(
8634 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8635 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8636 			"connectors_changed:%d\n",
8637 			acrtc->crtc_id,
8638 			new_crtc_state->enable,
8639 			new_crtc_state->active,
8640 			new_crtc_state->planes_changed,
8641 			new_crtc_state->mode_changed,
8642 			new_crtc_state->active_changed,
8643 			new_crtc_state->connectors_changed);
8644 
8645 		/* Disable cursor if disabling crtc */
8646 		if (old_crtc_state->active && !new_crtc_state->active) {
8647 			struct dc_cursor_position position;
8648 
8649 			memset(&position, 0, sizeof(position));
8650 			mutex_lock(&dm->dc_lock);
8651 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8652 			mutex_unlock(&dm->dc_lock);
8653 		}
8654 
8655 		/* Copy all transient state flags into dc state */
8656 		if (dm_new_crtc_state->stream) {
8657 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8658 							    dm_new_crtc_state->stream);
8659 		}
8660 
8661 		/* handles headless hotplug case, updating new_state and
8662 		 * aconnector as needed
8663 		 */
8664 
8665 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8666 
8667 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8668 
8669 			if (!dm_new_crtc_state->stream) {
8670 				/*
8671 				 * this could happen because of issues with
8672 				 * userspace notifications delivery.
8673 				 * In this case userspace tries to set mode on
8674 				 * display which is disconnected in fact.
8675 				 * dc_sink is NULL in this case on aconnector.
8676 				 * We expect reset mode will come soon.
8677 				 *
8678 				 * This can also happen when unplug is done
8679 				 * during resume sequence ended
8680 				 *
8681 				 * In this case, we want to pretend we still
8682 				 * have a sink to keep the pipe running so that
8683 				 * hw state is consistent with the sw state
8684 				 */
8685 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8686 						__func__, acrtc->base.base.id);
8687 				continue;
8688 			}
8689 
8690 			if (dm_old_crtc_state->stream)
8691 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8692 
8693 			pm_runtime_get_noresume(dev->dev);
8694 
8695 			acrtc->enabled = true;
8696 			acrtc->hw_mode = new_crtc_state->mode;
8697 			crtc->hwmode = new_crtc_state->mode;
8698 			mode_set_reset_required = true;
8699 		} else if (modereset_required(new_crtc_state)) {
8700 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8701 			/* i.e. reset mode */
8702 			if (dm_old_crtc_state->stream)
8703 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8704 
8705 			mode_set_reset_required = true;
8706 		}
8707 	} /* for_each_crtc_in_state() */
8708 
8709 	if (dc_state) {
8710 		/* if there mode set or reset, disable eDP PSR */
8711 		if (mode_set_reset_required)
8712 			amdgpu_dm_psr_disable_all(dm);
8713 
8714 		dm_enable_per_frame_crtc_master_sync(dc_state);
8715 		mutex_lock(&dm->dc_lock);
8716 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8717 #if defined(CONFIG_DRM_AMD_DC_DCN)
8718                /* Allow idle optimization when vblank count is 0 for display off */
8719                if (dm->active_vblank_irq_count == 0)
8720                    dc_allow_idle_optimizations(dm->dc,true);
8721 #endif
8722 		mutex_unlock(&dm->dc_lock);
8723 	}
8724 
8725 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8726 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8727 
8728 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8729 
8730 		if (dm_new_crtc_state->stream != NULL) {
8731 			const struct dc_stream_status *status =
8732 					dc_stream_get_status(dm_new_crtc_state->stream);
8733 
8734 			if (!status)
8735 				status = dc_stream_get_status_from_state(dc_state,
8736 									 dm_new_crtc_state->stream);
8737 			if (!status)
8738 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8739 			else
8740 				acrtc->otg_inst = status->primary_otg_inst;
8741 		}
8742 	}
8743 #ifdef CONFIG_DRM_AMD_DC_HDCP
8744 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8745 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8746 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8747 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8748 
8749 		new_crtc_state = NULL;
8750 
8751 		if (acrtc)
8752 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8753 
8754 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8755 
8756 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8757 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8758 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8759 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8760 			dm_new_con_state->update_hdcp = true;
8761 			continue;
8762 		}
8763 
8764 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8765 			hdcp_update_display(
8766 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8767 				new_con_state->hdcp_content_type,
8768 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8769 	}
8770 #endif
8771 
8772 	/* Handle connector state changes */
8773 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8774 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8775 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8776 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8777 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8778 		struct dc_stream_update stream_update;
8779 		struct dc_info_packet hdr_packet;
8780 		struct dc_stream_status *status = NULL;
8781 		bool abm_changed, hdr_changed, scaling_changed;
8782 
8783 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8784 		memset(&stream_update, 0, sizeof(stream_update));
8785 
8786 		if (acrtc) {
8787 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8788 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8789 		}
8790 
8791 		/* Skip any modesets/resets */
8792 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8793 			continue;
8794 
8795 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8796 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8797 
8798 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8799 							     dm_old_con_state);
8800 
8801 		abm_changed = dm_new_crtc_state->abm_level !=
8802 			      dm_old_crtc_state->abm_level;
8803 
8804 		hdr_changed =
8805 			is_hdr_metadata_different(old_con_state, new_con_state);
8806 
8807 		if (!scaling_changed && !abm_changed && !hdr_changed)
8808 			continue;
8809 
8810 		stream_update.stream = dm_new_crtc_state->stream;
8811 		if (scaling_changed) {
8812 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8813 					dm_new_con_state, dm_new_crtc_state->stream);
8814 
8815 			stream_update.src = dm_new_crtc_state->stream->src;
8816 			stream_update.dst = dm_new_crtc_state->stream->dst;
8817 		}
8818 
8819 		if (abm_changed) {
8820 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8821 
8822 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8823 		}
8824 
8825 		if (hdr_changed) {
8826 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8827 			stream_update.hdr_static_metadata = &hdr_packet;
8828 		}
8829 
8830 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8831 		WARN_ON(!status);
8832 		WARN_ON(!status->plane_count);
8833 
8834 		/*
8835 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8836 		 * Here we create an empty update on each plane.
8837 		 * To fix this, DC should permit updating only stream properties.
8838 		 */
8839 		for (j = 0; j < status->plane_count; j++)
8840 			dummy_updates[j].surface = status->plane_states[0];
8841 
8842 
8843 		mutex_lock(&dm->dc_lock);
8844 		dc_commit_updates_for_stream(dm->dc,
8845 						     dummy_updates,
8846 						     status->plane_count,
8847 						     dm_new_crtc_state->stream,
8848 						     &stream_update,
8849 						     dc_state);
8850 		mutex_unlock(&dm->dc_lock);
8851 	}
8852 
8853 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8854 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8855 				      new_crtc_state, i) {
8856 		if (old_crtc_state->active && !new_crtc_state->active)
8857 			crtc_disable_count++;
8858 
8859 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8860 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8861 
8862 		/* For freesync config update on crtc state and params for irq */
8863 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8864 
8865 		/* Handle vrr on->off / off->on transitions */
8866 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8867 						dm_new_crtc_state);
8868 	}
8869 
8870 	/**
8871 	 * Enable interrupts for CRTCs that are newly enabled or went through
8872 	 * a modeset. It was intentionally deferred until after the front end
8873 	 * state was modified to wait until the OTG was on and so the IRQ
8874 	 * handlers didn't access stale or invalid state.
8875 	 */
8876 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8877 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8878 #ifdef CONFIG_DEBUG_FS
8879 		bool configure_crc = false;
8880 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8881 #endif
8882 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8883 
8884 		if (new_crtc_state->active &&
8885 		    (!old_crtc_state->active ||
8886 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8887 			dc_stream_retain(dm_new_crtc_state->stream);
8888 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8889 			manage_dm_interrupts(adev, acrtc, true);
8890 
8891 #ifdef CONFIG_DEBUG_FS
8892 			/**
8893 			 * Frontend may have changed so reapply the CRC capture
8894 			 * settings for the stream.
8895 			 */
8896 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8897 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8898 			cur_crc_src = acrtc->dm_irq_params.crc_src;
8899 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8900 
8901 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8902 				configure_crc = true;
8903 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8904 				if (amdgpu_dm_crc_window_is_activated(crtc))
8905 					configure_crc = false;
8906 #endif
8907 			}
8908 
8909 			if (configure_crc)
8910 				amdgpu_dm_crtc_configure_crc_source(
8911 					crtc, dm_new_crtc_state, cur_crc_src);
8912 #endif
8913 		}
8914 	}
8915 
8916 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8917 		if (new_crtc_state->async_flip)
8918 			wait_for_vblank = false;
8919 
8920 	/* update planes when needed per crtc*/
8921 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8922 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8923 
8924 		if (dm_new_crtc_state->stream)
8925 			amdgpu_dm_commit_planes(state, dc_state, dev,
8926 						dm, crtc, wait_for_vblank);
8927 	}
8928 
8929 	/* Update audio instances for each connector. */
8930 	amdgpu_dm_commit_audio(dev, state);
8931 
8932 	/*
8933 	 * send vblank event on all events not handled in flip and
8934 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8935 	 */
8936 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8937 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8938 
8939 		if (new_crtc_state->event)
8940 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8941 
8942 		new_crtc_state->event = NULL;
8943 	}
8944 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8945 
8946 	/* Signal HW programming completion */
8947 	drm_atomic_helper_commit_hw_done(state);
8948 
8949 	if (wait_for_vblank)
8950 		drm_atomic_helper_wait_for_flip_done(dev, state);
8951 
8952 	drm_atomic_helper_cleanup_planes(dev, state);
8953 
8954 	/* return the stolen vga memory back to VRAM */
8955 	if (!adev->mman.keep_stolen_vga_memory)
8956 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8957 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8958 
8959 	/*
8960 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8961 	 * so we can put the GPU into runtime suspend if we're not driving any
8962 	 * displays anymore
8963 	 */
8964 	for (i = 0; i < crtc_disable_count; i++)
8965 		pm_runtime_put_autosuspend(dev->dev);
8966 	pm_runtime_mark_last_busy(dev->dev);
8967 
8968 	if (dc_state_temp)
8969 		dc_release_state(dc_state_temp);
8970 }
8971 
8972 
8973 static int dm_force_atomic_commit(struct drm_connector *connector)
8974 {
8975 	int ret = 0;
8976 	struct drm_device *ddev = connector->dev;
8977 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8978 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8979 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8980 	struct drm_connector_state *conn_state;
8981 	struct drm_crtc_state *crtc_state;
8982 	struct drm_plane_state *plane_state;
8983 
8984 	if (!state)
8985 		return -ENOMEM;
8986 
8987 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8988 
8989 	/* Construct an atomic state to restore previous display setting */
8990 
8991 	/*
8992 	 * Attach connectors to drm_atomic_state
8993 	 */
8994 	conn_state = drm_atomic_get_connector_state(state, connector);
8995 
8996 	ret = PTR_ERR_OR_ZERO(conn_state);
8997 	if (ret)
8998 		goto out;
8999 
9000 	/* Attach crtc to drm_atomic_state*/
9001 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9002 
9003 	ret = PTR_ERR_OR_ZERO(crtc_state);
9004 	if (ret)
9005 		goto out;
9006 
9007 	/* force a restore */
9008 	crtc_state->mode_changed = true;
9009 
9010 	/* Attach plane to drm_atomic_state */
9011 	plane_state = drm_atomic_get_plane_state(state, plane);
9012 
9013 	ret = PTR_ERR_OR_ZERO(plane_state);
9014 	if (ret)
9015 		goto out;
9016 
9017 	/* Call commit internally with the state we just constructed */
9018 	ret = drm_atomic_commit(state);
9019 
9020 out:
9021 	drm_atomic_state_put(state);
9022 	if (ret)
9023 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9024 
9025 	return ret;
9026 }
9027 
9028 /*
9029  * This function handles all cases when set mode does not come upon hotplug.
9030  * This includes when a display is unplugged then plugged back into the
9031  * same port and when running without usermode desktop manager supprot
9032  */
9033 void dm_restore_drm_connector_state(struct drm_device *dev,
9034 				    struct drm_connector *connector)
9035 {
9036 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9037 	struct amdgpu_crtc *disconnected_acrtc;
9038 	struct dm_crtc_state *acrtc_state;
9039 
9040 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9041 		return;
9042 
9043 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9044 	if (!disconnected_acrtc)
9045 		return;
9046 
9047 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9048 	if (!acrtc_state->stream)
9049 		return;
9050 
9051 	/*
9052 	 * If the previous sink is not released and different from the current,
9053 	 * we deduce we are in a state where we can not rely on usermode call
9054 	 * to turn on the display, so we do it here
9055 	 */
9056 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9057 		dm_force_atomic_commit(&aconnector->base);
9058 }
9059 
9060 /*
9061  * Grabs all modesetting locks to serialize against any blocking commits,
9062  * Waits for completion of all non blocking commits.
9063  */
9064 static int do_aquire_global_lock(struct drm_device *dev,
9065 				 struct drm_atomic_state *state)
9066 {
9067 	struct drm_crtc *crtc;
9068 	struct drm_crtc_commit *commit;
9069 	long ret;
9070 
9071 	/*
9072 	 * Adding all modeset locks to aquire_ctx will
9073 	 * ensure that when the framework release it the
9074 	 * extra locks we are locking here will get released to
9075 	 */
9076 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9077 	if (ret)
9078 		return ret;
9079 
9080 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9081 		spin_lock(&crtc->commit_lock);
9082 		commit = list_first_entry_or_null(&crtc->commit_list,
9083 				struct drm_crtc_commit, commit_entry);
9084 		if (commit)
9085 			drm_crtc_commit_get(commit);
9086 		spin_unlock(&crtc->commit_lock);
9087 
9088 		if (!commit)
9089 			continue;
9090 
9091 		/*
9092 		 * Make sure all pending HW programming completed and
9093 		 * page flips done
9094 		 */
9095 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9096 
9097 		if (ret > 0)
9098 			ret = wait_for_completion_interruptible_timeout(
9099 					&commit->flip_done, 10*HZ);
9100 
9101 		if (ret == 0)
9102 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9103 				  "timed out\n", crtc->base.id, crtc->name);
9104 
9105 		drm_crtc_commit_put(commit);
9106 	}
9107 
9108 	return ret < 0 ? ret : 0;
9109 }
9110 
9111 static void get_freesync_config_for_crtc(
9112 	struct dm_crtc_state *new_crtc_state,
9113 	struct dm_connector_state *new_con_state)
9114 {
9115 	struct mod_freesync_config config = {0};
9116 	struct amdgpu_dm_connector *aconnector =
9117 			to_amdgpu_dm_connector(new_con_state->base.connector);
9118 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9119 	int vrefresh = drm_mode_vrefresh(mode);
9120 	bool fs_vid_mode = false;
9121 
9122 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9123 					vrefresh >= aconnector->min_vfreq &&
9124 					vrefresh <= aconnector->max_vfreq;
9125 
9126 	if (new_crtc_state->vrr_supported) {
9127 		new_crtc_state->stream->ignore_msa_timing_param = true;
9128 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9129 
9130 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9131 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9132 		config.vsif_supported = true;
9133 		config.btr = true;
9134 
9135 		if (fs_vid_mode) {
9136 			config.state = VRR_STATE_ACTIVE_FIXED;
9137 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9138 			goto out;
9139 		} else if (new_crtc_state->base.vrr_enabled) {
9140 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9141 		} else {
9142 			config.state = VRR_STATE_INACTIVE;
9143 		}
9144 	}
9145 out:
9146 	new_crtc_state->freesync_config = config;
9147 }
9148 
9149 static void reset_freesync_config_for_crtc(
9150 	struct dm_crtc_state *new_crtc_state)
9151 {
9152 	new_crtc_state->vrr_supported = false;
9153 
9154 	memset(&new_crtc_state->vrr_infopacket, 0,
9155 	       sizeof(new_crtc_state->vrr_infopacket));
9156 }
9157 
9158 static bool
9159 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9160 				 struct drm_crtc_state *new_crtc_state)
9161 {
9162 	struct drm_display_mode old_mode, new_mode;
9163 
9164 	if (!old_crtc_state || !new_crtc_state)
9165 		return false;
9166 
9167 	old_mode = old_crtc_state->mode;
9168 	new_mode = new_crtc_state->mode;
9169 
9170 	if (old_mode.clock       == new_mode.clock &&
9171 	    old_mode.hdisplay    == new_mode.hdisplay &&
9172 	    old_mode.vdisplay    == new_mode.vdisplay &&
9173 	    old_mode.htotal      == new_mode.htotal &&
9174 	    old_mode.vtotal      != new_mode.vtotal &&
9175 	    old_mode.hsync_start == new_mode.hsync_start &&
9176 	    old_mode.vsync_start != new_mode.vsync_start &&
9177 	    old_mode.hsync_end   == new_mode.hsync_end &&
9178 	    old_mode.vsync_end   != new_mode.vsync_end &&
9179 	    old_mode.hskew       == new_mode.hskew &&
9180 	    old_mode.vscan       == new_mode.vscan &&
9181 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9182 	    (new_mode.vsync_end - new_mode.vsync_start))
9183 		return true;
9184 
9185 	return false;
9186 }
9187 
9188 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9189 	uint64_t num, den, res;
9190 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9191 
9192 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9193 
9194 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9195 	den = (unsigned long long)new_crtc_state->mode.htotal *
9196 	      (unsigned long long)new_crtc_state->mode.vtotal;
9197 
9198 	res = div_u64(num, den);
9199 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9200 }
9201 
9202 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9203 				struct drm_atomic_state *state,
9204 				struct drm_crtc *crtc,
9205 				struct drm_crtc_state *old_crtc_state,
9206 				struct drm_crtc_state *new_crtc_state,
9207 				bool enable,
9208 				bool *lock_and_validation_needed)
9209 {
9210 	struct dm_atomic_state *dm_state = NULL;
9211 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9212 	struct dc_stream_state *new_stream;
9213 	int ret = 0;
9214 
9215 	/*
9216 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9217 	 * update changed items
9218 	 */
9219 	struct amdgpu_crtc *acrtc = NULL;
9220 	struct amdgpu_dm_connector *aconnector = NULL;
9221 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9222 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9223 
9224 	new_stream = NULL;
9225 
9226 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9227 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9228 	acrtc = to_amdgpu_crtc(crtc);
9229 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9230 
9231 	/* TODO This hack should go away */
9232 	if (aconnector && enable) {
9233 		/* Make sure fake sink is created in plug-in scenario */
9234 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9235 							    &aconnector->base);
9236 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9237 							    &aconnector->base);
9238 
9239 		if (IS_ERR(drm_new_conn_state)) {
9240 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9241 			goto fail;
9242 		}
9243 
9244 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9245 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9246 
9247 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9248 			goto skip_modeset;
9249 
9250 		new_stream = create_validate_stream_for_sink(aconnector,
9251 							     &new_crtc_state->mode,
9252 							     dm_new_conn_state,
9253 							     dm_old_crtc_state->stream);
9254 
9255 		/*
9256 		 * we can have no stream on ACTION_SET if a display
9257 		 * was disconnected during S3, in this case it is not an
9258 		 * error, the OS will be updated after detection, and
9259 		 * will do the right thing on next atomic commit
9260 		 */
9261 
9262 		if (!new_stream) {
9263 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9264 					__func__, acrtc->base.base.id);
9265 			ret = -ENOMEM;
9266 			goto fail;
9267 		}
9268 
9269 		/*
9270 		 * TODO: Check VSDB bits to decide whether this should
9271 		 * be enabled or not.
9272 		 */
9273 		new_stream->triggered_crtc_reset.enabled =
9274 			dm->force_timing_sync;
9275 
9276 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9277 
9278 		ret = fill_hdr_info_packet(drm_new_conn_state,
9279 					   &new_stream->hdr_static_metadata);
9280 		if (ret)
9281 			goto fail;
9282 
9283 		/*
9284 		 * If we already removed the old stream from the context
9285 		 * (and set the new stream to NULL) then we can't reuse
9286 		 * the old stream even if the stream and scaling are unchanged.
9287 		 * We'll hit the BUG_ON and black screen.
9288 		 *
9289 		 * TODO: Refactor this function to allow this check to work
9290 		 * in all conditions.
9291 		 */
9292 		if (amdgpu_freesync_vid_mode &&
9293 		    dm_new_crtc_state->stream &&
9294 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9295 			goto skip_modeset;
9296 
9297 		if (dm_new_crtc_state->stream &&
9298 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9299 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9300 			new_crtc_state->mode_changed = false;
9301 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9302 					 new_crtc_state->mode_changed);
9303 		}
9304 	}
9305 
9306 	/* mode_changed flag may get updated above, need to check again */
9307 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9308 		goto skip_modeset;
9309 
9310 	DRM_DEBUG_ATOMIC(
9311 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9312 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9313 		"connectors_changed:%d\n",
9314 		acrtc->crtc_id,
9315 		new_crtc_state->enable,
9316 		new_crtc_state->active,
9317 		new_crtc_state->planes_changed,
9318 		new_crtc_state->mode_changed,
9319 		new_crtc_state->active_changed,
9320 		new_crtc_state->connectors_changed);
9321 
9322 	/* Remove stream for any changed/disabled CRTC */
9323 	if (!enable) {
9324 
9325 		if (!dm_old_crtc_state->stream)
9326 			goto skip_modeset;
9327 
9328 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9329 		    is_timing_unchanged_for_freesync(new_crtc_state,
9330 						     old_crtc_state)) {
9331 			new_crtc_state->mode_changed = false;
9332 			DRM_DEBUG_DRIVER(
9333 				"Mode change not required for front porch change, "
9334 				"setting mode_changed to %d",
9335 				new_crtc_state->mode_changed);
9336 
9337 			set_freesync_fixed_config(dm_new_crtc_state);
9338 
9339 			goto skip_modeset;
9340 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9341 			   is_freesync_video_mode(&new_crtc_state->mode,
9342 						  aconnector)) {
9343 			set_freesync_fixed_config(dm_new_crtc_state);
9344 		}
9345 
9346 		ret = dm_atomic_get_state(state, &dm_state);
9347 		if (ret)
9348 			goto fail;
9349 
9350 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9351 				crtc->base.id);
9352 
9353 		/* i.e. reset mode */
9354 		if (dc_remove_stream_from_ctx(
9355 				dm->dc,
9356 				dm_state->context,
9357 				dm_old_crtc_state->stream) != DC_OK) {
9358 			ret = -EINVAL;
9359 			goto fail;
9360 		}
9361 
9362 		dc_stream_release(dm_old_crtc_state->stream);
9363 		dm_new_crtc_state->stream = NULL;
9364 
9365 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9366 
9367 		*lock_and_validation_needed = true;
9368 
9369 	} else {/* Add stream for any updated/enabled CRTC */
9370 		/*
9371 		 * Quick fix to prevent NULL pointer on new_stream when
9372 		 * added MST connectors not found in existing crtc_state in the chained mode
9373 		 * TODO: need to dig out the root cause of that
9374 		 */
9375 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9376 			goto skip_modeset;
9377 
9378 		if (modereset_required(new_crtc_state))
9379 			goto skip_modeset;
9380 
9381 		if (modeset_required(new_crtc_state, new_stream,
9382 				     dm_old_crtc_state->stream)) {
9383 
9384 			WARN_ON(dm_new_crtc_state->stream);
9385 
9386 			ret = dm_atomic_get_state(state, &dm_state);
9387 			if (ret)
9388 				goto fail;
9389 
9390 			dm_new_crtc_state->stream = new_stream;
9391 
9392 			dc_stream_retain(new_stream);
9393 
9394 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9395 					 crtc->base.id);
9396 
9397 			if (dc_add_stream_to_ctx(
9398 					dm->dc,
9399 					dm_state->context,
9400 					dm_new_crtc_state->stream) != DC_OK) {
9401 				ret = -EINVAL;
9402 				goto fail;
9403 			}
9404 
9405 			*lock_and_validation_needed = true;
9406 		}
9407 	}
9408 
9409 skip_modeset:
9410 	/* Release extra reference */
9411 	if (new_stream)
9412 		 dc_stream_release(new_stream);
9413 
9414 	/*
9415 	 * We want to do dc stream updates that do not require a
9416 	 * full modeset below.
9417 	 */
9418 	if (!(enable && aconnector && new_crtc_state->active))
9419 		return 0;
9420 	/*
9421 	 * Given above conditions, the dc state cannot be NULL because:
9422 	 * 1. We're in the process of enabling CRTCs (just been added
9423 	 *    to the dc context, or already is on the context)
9424 	 * 2. Has a valid connector attached, and
9425 	 * 3. Is currently active and enabled.
9426 	 * => The dc stream state currently exists.
9427 	 */
9428 	BUG_ON(dm_new_crtc_state->stream == NULL);
9429 
9430 	/* Scaling or underscan settings */
9431 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9432 		update_stream_scaling_settings(
9433 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9434 
9435 	/* ABM settings */
9436 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9437 
9438 	/*
9439 	 * Color management settings. We also update color properties
9440 	 * when a modeset is needed, to ensure it gets reprogrammed.
9441 	 */
9442 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9443 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9444 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9445 		if (ret)
9446 			goto fail;
9447 	}
9448 
9449 	/* Update Freesync settings. */
9450 	get_freesync_config_for_crtc(dm_new_crtc_state,
9451 				     dm_new_conn_state);
9452 
9453 	return ret;
9454 
9455 fail:
9456 	if (new_stream)
9457 		dc_stream_release(new_stream);
9458 	return ret;
9459 }
9460 
9461 static bool should_reset_plane(struct drm_atomic_state *state,
9462 			       struct drm_plane *plane,
9463 			       struct drm_plane_state *old_plane_state,
9464 			       struct drm_plane_state *new_plane_state)
9465 {
9466 	struct drm_plane *other;
9467 	struct drm_plane_state *old_other_state, *new_other_state;
9468 	struct drm_crtc_state *new_crtc_state;
9469 	int i;
9470 
9471 	/*
9472 	 * TODO: Remove this hack once the checks below are sufficient
9473 	 * enough to determine when we need to reset all the planes on
9474 	 * the stream.
9475 	 */
9476 	if (state->allow_modeset)
9477 		return true;
9478 
9479 	/* Exit early if we know that we're adding or removing the plane. */
9480 	if (old_plane_state->crtc != new_plane_state->crtc)
9481 		return true;
9482 
9483 	/* old crtc == new_crtc == NULL, plane not in context. */
9484 	if (!new_plane_state->crtc)
9485 		return false;
9486 
9487 	new_crtc_state =
9488 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9489 
9490 	if (!new_crtc_state)
9491 		return true;
9492 
9493 	/* CRTC Degamma changes currently require us to recreate planes. */
9494 	if (new_crtc_state->color_mgmt_changed)
9495 		return true;
9496 
9497 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9498 		return true;
9499 
9500 	/*
9501 	 * If there are any new primary or overlay planes being added or
9502 	 * removed then the z-order can potentially change. To ensure
9503 	 * correct z-order and pipe acquisition the current DC architecture
9504 	 * requires us to remove and recreate all existing planes.
9505 	 *
9506 	 * TODO: Come up with a more elegant solution for this.
9507 	 */
9508 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9509 		struct amdgpu_framebuffer *old_afb, *new_afb;
9510 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9511 			continue;
9512 
9513 		if (old_other_state->crtc != new_plane_state->crtc &&
9514 		    new_other_state->crtc != new_plane_state->crtc)
9515 			continue;
9516 
9517 		if (old_other_state->crtc != new_other_state->crtc)
9518 			return true;
9519 
9520 		/* Src/dst size and scaling updates. */
9521 		if (old_other_state->src_w != new_other_state->src_w ||
9522 		    old_other_state->src_h != new_other_state->src_h ||
9523 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9524 		    old_other_state->crtc_h != new_other_state->crtc_h)
9525 			return true;
9526 
9527 		/* Rotation / mirroring updates. */
9528 		if (old_other_state->rotation != new_other_state->rotation)
9529 			return true;
9530 
9531 		/* Blending updates. */
9532 		if (old_other_state->pixel_blend_mode !=
9533 		    new_other_state->pixel_blend_mode)
9534 			return true;
9535 
9536 		/* Alpha updates. */
9537 		if (old_other_state->alpha != new_other_state->alpha)
9538 			return true;
9539 
9540 		/* Colorspace changes. */
9541 		if (old_other_state->color_range != new_other_state->color_range ||
9542 		    old_other_state->color_encoding != new_other_state->color_encoding)
9543 			return true;
9544 
9545 		/* Framebuffer checks fall at the end. */
9546 		if (!old_other_state->fb || !new_other_state->fb)
9547 			continue;
9548 
9549 		/* Pixel format changes can require bandwidth updates. */
9550 		if (old_other_state->fb->format != new_other_state->fb->format)
9551 			return true;
9552 
9553 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9554 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9555 
9556 		/* Tiling and DCC changes also require bandwidth updates. */
9557 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9558 		    old_afb->base.modifier != new_afb->base.modifier)
9559 			return true;
9560 	}
9561 
9562 	return false;
9563 }
9564 
9565 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9566 			      struct drm_plane_state *new_plane_state,
9567 			      struct drm_framebuffer *fb)
9568 {
9569 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9570 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9571 	unsigned int pitch;
9572 	bool linear;
9573 
9574 	if (fb->width > new_acrtc->max_cursor_width ||
9575 	    fb->height > new_acrtc->max_cursor_height) {
9576 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9577 				 new_plane_state->fb->width,
9578 				 new_plane_state->fb->height);
9579 		return -EINVAL;
9580 	}
9581 	if (new_plane_state->src_w != fb->width << 16 ||
9582 	    new_plane_state->src_h != fb->height << 16) {
9583 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9584 		return -EINVAL;
9585 	}
9586 
9587 	/* Pitch in pixels */
9588 	pitch = fb->pitches[0] / fb->format->cpp[0];
9589 
9590 	if (fb->width != pitch) {
9591 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9592 				 fb->width, pitch);
9593 		return -EINVAL;
9594 	}
9595 
9596 	switch (pitch) {
9597 	case 64:
9598 	case 128:
9599 	case 256:
9600 		/* FB pitch is supported by cursor plane */
9601 		break;
9602 	default:
9603 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9604 		return -EINVAL;
9605 	}
9606 
9607 	/* Core DRM takes care of checking FB modifiers, so we only need to
9608 	 * check tiling flags when the FB doesn't have a modifier. */
9609 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9610 		if (adev->family < AMDGPU_FAMILY_AI) {
9611 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9612 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9613 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9614 		} else {
9615 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9616 		}
9617 		if (!linear) {
9618 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9619 			return -EINVAL;
9620 		}
9621 	}
9622 
9623 	return 0;
9624 }
9625 
9626 static int dm_update_plane_state(struct dc *dc,
9627 				 struct drm_atomic_state *state,
9628 				 struct drm_plane *plane,
9629 				 struct drm_plane_state *old_plane_state,
9630 				 struct drm_plane_state *new_plane_state,
9631 				 bool enable,
9632 				 bool *lock_and_validation_needed)
9633 {
9634 
9635 	struct dm_atomic_state *dm_state = NULL;
9636 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9637 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9638 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9639 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9640 	struct amdgpu_crtc *new_acrtc;
9641 	bool needs_reset;
9642 	int ret = 0;
9643 
9644 
9645 	new_plane_crtc = new_plane_state->crtc;
9646 	old_plane_crtc = old_plane_state->crtc;
9647 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9648 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9649 
9650 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9651 		if (!enable || !new_plane_crtc ||
9652 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9653 			return 0;
9654 
9655 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9656 
9657 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9658 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9659 			return -EINVAL;
9660 		}
9661 
9662 		if (new_plane_state->fb) {
9663 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9664 						 new_plane_state->fb);
9665 			if (ret)
9666 				return ret;
9667 		}
9668 
9669 		return 0;
9670 	}
9671 
9672 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9673 					 new_plane_state);
9674 
9675 	/* Remove any changed/removed planes */
9676 	if (!enable) {
9677 		if (!needs_reset)
9678 			return 0;
9679 
9680 		if (!old_plane_crtc)
9681 			return 0;
9682 
9683 		old_crtc_state = drm_atomic_get_old_crtc_state(
9684 				state, old_plane_crtc);
9685 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9686 
9687 		if (!dm_old_crtc_state->stream)
9688 			return 0;
9689 
9690 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9691 				plane->base.id, old_plane_crtc->base.id);
9692 
9693 		ret = dm_atomic_get_state(state, &dm_state);
9694 		if (ret)
9695 			return ret;
9696 
9697 		if (!dc_remove_plane_from_context(
9698 				dc,
9699 				dm_old_crtc_state->stream,
9700 				dm_old_plane_state->dc_state,
9701 				dm_state->context)) {
9702 
9703 			return -EINVAL;
9704 		}
9705 
9706 
9707 		dc_plane_state_release(dm_old_plane_state->dc_state);
9708 		dm_new_plane_state->dc_state = NULL;
9709 
9710 		*lock_and_validation_needed = true;
9711 
9712 	} else { /* Add new planes */
9713 		struct dc_plane_state *dc_new_plane_state;
9714 
9715 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9716 			return 0;
9717 
9718 		if (!new_plane_crtc)
9719 			return 0;
9720 
9721 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9722 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9723 
9724 		if (!dm_new_crtc_state->stream)
9725 			return 0;
9726 
9727 		if (!needs_reset)
9728 			return 0;
9729 
9730 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9731 		if (ret)
9732 			return ret;
9733 
9734 		WARN_ON(dm_new_plane_state->dc_state);
9735 
9736 		dc_new_plane_state = dc_create_plane_state(dc);
9737 		if (!dc_new_plane_state)
9738 			return -ENOMEM;
9739 
9740 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9741 				 plane->base.id, new_plane_crtc->base.id);
9742 
9743 		ret = fill_dc_plane_attributes(
9744 			drm_to_adev(new_plane_crtc->dev),
9745 			dc_new_plane_state,
9746 			new_plane_state,
9747 			new_crtc_state);
9748 		if (ret) {
9749 			dc_plane_state_release(dc_new_plane_state);
9750 			return ret;
9751 		}
9752 
9753 		ret = dm_atomic_get_state(state, &dm_state);
9754 		if (ret) {
9755 			dc_plane_state_release(dc_new_plane_state);
9756 			return ret;
9757 		}
9758 
9759 		/*
9760 		 * Any atomic check errors that occur after this will
9761 		 * not need a release. The plane state will be attached
9762 		 * to the stream, and therefore part of the atomic
9763 		 * state. It'll be released when the atomic state is
9764 		 * cleaned.
9765 		 */
9766 		if (!dc_add_plane_to_context(
9767 				dc,
9768 				dm_new_crtc_state->stream,
9769 				dc_new_plane_state,
9770 				dm_state->context)) {
9771 
9772 			dc_plane_state_release(dc_new_plane_state);
9773 			return -EINVAL;
9774 		}
9775 
9776 		dm_new_plane_state->dc_state = dc_new_plane_state;
9777 
9778 		/* Tell DC to do a full surface update every time there
9779 		 * is a plane change. Inefficient, but works for now.
9780 		 */
9781 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9782 
9783 		*lock_and_validation_needed = true;
9784 	}
9785 
9786 
9787 	return ret;
9788 }
9789 
9790 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9791 				struct drm_crtc *crtc,
9792 				struct drm_crtc_state *new_crtc_state)
9793 {
9794 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9795 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9796 
9797 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9798 	 * cursor per pipe but it's going to inherit the scaling and
9799 	 * positioning from the underlying pipe. Check the cursor plane's
9800 	 * blending properties match the primary plane's. */
9801 
9802 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9803 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9804 	if (!new_cursor_state || !new_primary_state ||
9805 	    !new_cursor_state->fb || !new_primary_state->fb) {
9806 		return 0;
9807 	}
9808 
9809 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9810 			 (new_cursor_state->src_w >> 16);
9811 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9812 			 (new_cursor_state->src_h >> 16);
9813 
9814 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9815 			 (new_primary_state->src_w >> 16);
9816 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9817 			 (new_primary_state->src_h >> 16);
9818 
9819 	if (cursor_scale_w != primary_scale_w ||
9820 	    cursor_scale_h != primary_scale_h) {
9821 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9822 		return -EINVAL;
9823 	}
9824 
9825 	return 0;
9826 }
9827 
9828 #if defined(CONFIG_DRM_AMD_DC_DCN)
9829 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9830 {
9831 	struct drm_connector *connector;
9832 	struct drm_connector_state *conn_state;
9833 	struct amdgpu_dm_connector *aconnector = NULL;
9834 	int i;
9835 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9836 		if (conn_state->crtc != crtc)
9837 			continue;
9838 
9839 		aconnector = to_amdgpu_dm_connector(connector);
9840 		if (!aconnector->port || !aconnector->mst_port)
9841 			aconnector = NULL;
9842 		else
9843 			break;
9844 	}
9845 
9846 	if (!aconnector)
9847 		return 0;
9848 
9849 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9850 }
9851 #endif
9852 
9853 /**
9854  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9855  * @dev: The DRM device
9856  * @state: The atomic state to commit
9857  *
9858  * Validate that the given atomic state is programmable by DC into hardware.
9859  * This involves constructing a &struct dc_state reflecting the new hardware
9860  * state we wish to commit, then querying DC to see if it is programmable. It's
9861  * important not to modify the existing DC state. Otherwise, atomic_check
9862  * may unexpectedly commit hardware changes.
9863  *
9864  * When validating the DC state, it's important that the right locks are
9865  * acquired. For full updates case which removes/adds/updates streams on one
9866  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9867  * that any such full update commit will wait for completion of any outstanding
9868  * flip using DRMs synchronization events.
9869  *
9870  * Note that DM adds the affected connectors for all CRTCs in state, when that
9871  * might not seem necessary. This is because DC stream creation requires the
9872  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9873  * be possible but non-trivial - a possible TODO item.
9874  *
9875  * Return: -Error code if validation failed.
9876  */
9877 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9878 				  struct drm_atomic_state *state)
9879 {
9880 	struct amdgpu_device *adev = drm_to_adev(dev);
9881 	struct dm_atomic_state *dm_state = NULL;
9882 	struct dc *dc = adev->dm.dc;
9883 	struct drm_connector *connector;
9884 	struct drm_connector_state *old_con_state, *new_con_state;
9885 	struct drm_crtc *crtc;
9886 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9887 	struct drm_plane *plane;
9888 	struct drm_plane_state *old_plane_state, *new_plane_state;
9889 	enum dc_status status;
9890 	int ret, i;
9891 	bool lock_and_validation_needed = false;
9892 	struct dm_crtc_state *dm_old_crtc_state;
9893 
9894 	trace_amdgpu_dm_atomic_check_begin(state);
9895 
9896 	ret = drm_atomic_helper_check_modeset(dev, state);
9897 	if (ret)
9898 		goto fail;
9899 
9900 	/* Check connector changes */
9901 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9902 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9903 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9904 
9905 		/* Skip connectors that are disabled or part of modeset already. */
9906 		if (!old_con_state->crtc && !new_con_state->crtc)
9907 			continue;
9908 
9909 		if (!new_con_state->crtc)
9910 			continue;
9911 
9912 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9913 		if (IS_ERR(new_crtc_state)) {
9914 			ret = PTR_ERR(new_crtc_state);
9915 			goto fail;
9916 		}
9917 
9918 		if (dm_old_con_state->abm_level !=
9919 		    dm_new_con_state->abm_level)
9920 			new_crtc_state->connectors_changed = true;
9921 	}
9922 
9923 #if defined(CONFIG_DRM_AMD_DC_DCN)
9924 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9925 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9926 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9927 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9928 				if (ret)
9929 					goto fail;
9930 			}
9931 		}
9932 	}
9933 #endif
9934 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9935 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9936 
9937 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9938 		    !new_crtc_state->color_mgmt_changed &&
9939 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9940 			dm_old_crtc_state->dsc_force_changed == false)
9941 			continue;
9942 
9943 		if (!new_crtc_state->enable)
9944 			continue;
9945 
9946 		ret = drm_atomic_add_affected_connectors(state, crtc);
9947 		if (ret)
9948 			return ret;
9949 
9950 		ret = drm_atomic_add_affected_planes(state, crtc);
9951 		if (ret)
9952 			goto fail;
9953 
9954 		if (dm_old_crtc_state->dsc_force_changed)
9955 			new_crtc_state->mode_changed = true;
9956 	}
9957 
9958 	/*
9959 	 * Add all primary and overlay planes on the CRTC to the state
9960 	 * whenever a plane is enabled to maintain correct z-ordering
9961 	 * and to enable fast surface updates.
9962 	 */
9963 	drm_for_each_crtc(crtc, dev) {
9964 		bool modified = false;
9965 
9966 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9967 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9968 				continue;
9969 
9970 			if (new_plane_state->crtc == crtc ||
9971 			    old_plane_state->crtc == crtc) {
9972 				modified = true;
9973 				break;
9974 			}
9975 		}
9976 
9977 		if (!modified)
9978 			continue;
9979 
9980 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9981 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9982 				continue;
9983 
9984 			new_plane_state =
9985 				drm_atomic_get_plane_state(state, plane);
9986 
9987 			if (IS_ERR(new_plane_state)) {
9988 				ret = PTR_ERR(new_plane_state);
9989 				goto fail;
9990 			}
9991 		}
9992 	}
9993 
9994 	/* Remove exiting planes if they are modified */
9995 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9996 		ret = dm_update_plane_state(dc, state, plane,
9997 					    old_plane_state,
9998 					    new_plane_state,
9999 					    false,
10000 					    &lock_and_validation_needed);
10001 		if (ret)
10002 			goto fail;
10003 	}
10004 
10005 	/* Disable all crtcs which require disable */
10006 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10007 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10008 					   old_crtc_state,
10009 					   new_crtc_state,
10010 					   false,
10011 					   &lock_and_validation_needed);
10012 		if (ret)
10013 			goto fail;
10014 	}
10015 
10016 	/* Enable all crtcs which require enable */
10017 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10018 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10019 					   old_crtc_state,
10020 					   new_crtc_state,
10021 					   true,
10022 					   &lock_and_validation_needed);
10023 		if (ret)
10024 			goto fail;
10025 	}
10026 
10027 	/* Add new/modified planes */
10028 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10029 		ret = dm_update_plane_state(dc, state, plane,
10030 					    old_plane_state,
10031 					    new_plane_state,
10032 					    true,
10033 					    &lock_and_validation_needed);
10034 		if (ret)
10035 			goto fail;
10036 	}
10037 
10038 	/* Run this here since we want to validate the streams we created */
10039 	ret = drm_atomic_helper_check_planes(dev, state);
10040 	if (ret)
10041 		goto fail;
10042 
10043 	/* Check cursor planes scaling */
10044 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10045 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10046 		if (ret)
10047 			goto fail;
10048 	}
10049 
10050 	if (state->legacy_cursor_update) {
10051 		/*
10052 		 * This is a fast cursor update coming from the plane update
10053 		 * helper, check if it can be done asynchronously for better
10054 		 * performance.
10055 		 */
10056 		state->async_update =
10057 			!drm_atomic_helper_async_check(dev, state);
10058 
10059 		/*
10060 		 * Skip the remaining global validation if this is an async
10061 		 * update. Cursor updates can be done without affecting
10062 		 * state or bandwidth calcs and this avoids the performance
10063 		 * penalty of locking the private state object and
10064 		 * allocating a new dc_state.
10065 		 */
10066 		if (state->async_update)
10067 			return 0;
10068 	}
10069 
10070 	/* Check scaling and underscan changes*/
10071 	/* TODO Removed scaling changes validation due to inability to commit
10072 	 * new stream into context w\o causing full reset. Need to
10073 	 * decide how to handle.
10074 	 */
10075 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10076 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10077 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10078 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10079 
10080 		/* Skip any modesets/resets */
10081 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10082 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10083 			continue;
10084 
10085 		/* Skip any thing not scale or underscan changes */
10086 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10087 			continue;
10088 
10089 		lock_and_validation_needed = true;
10090 	}
10091 
10092 	/**
10093 	 * Streams and planes are reset when there are changes that affect
10094 	 * bandwidth. Anything that affects bandwidth needs to go through
10095 	 * DC global validation to ensure that the configuration can be applied
10096 	 * to hardware.
10097 	 *
10098 	 * We have to currently stall out here in atomic_check for outstanding
10099 	 * commits to finish in this case because our IRQ handlers reference
10100 	 * DRM state directly - we can end up disabling interrupts too early
10101 	 * if we don't.
10102 	 *
10103 	 * TODO: Remove this stall and drop DM state private objects.
10104 	 */
10105 	if (lock_and_validation_needed) {
10106 		ret = dm_atomic_get_state(state, &dm_state);
10107 		if (ret)
10108 			goto fail;
10109 
10110 		ret = do_aquire_global_lock(dev, state);
10111 		if (ret)
10112 			goto fail;
10113 
10114 #if defined(CONFIG_DRM_AMD_DC_DCN)
10115 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10116 			goto fail;
10117 
10118 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10119 		if (ret)
10120 			goto fail;
10121 #endif
10122 
10123 		/*
10124 		 * Perform validation of MST topology in the state:
10125 		 * We need to perform MST atomic check before calling
10126 		 * dc_validate_global_state(), or there is a chance
10127 		 * to get stuck in an infinite loop and hang eventually.
10128 		 */
10129 		ret = drm_dp_mst_atomic_check(state);
10130 		if (ret)
10131 			goto fail;
10132 		status = dc_validate_global_state(dc, dm_state->context, false);
10133 		if (status != DC_OK) {
10134 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10135 				       dc_status_to_str(status), status);
10136 			ret = -EINVAL;
10137 			goto fail;
10138 		}
10139 	} else {
10140 		/*
10141 		 * The commit is a fast update. Fast updates shouldn't change
10142 		 * the DC context, affect global validation, and can have their
10143 		 * commit work done in parallel with other commits not touching
10144 		 * the same resource. If we have a new DC context as part of
10145 		 * the DM atomic state from validation we need to free it and
10146 		 * retain the existing one instead.
10147 		 *
10148 		 * Furthermore, since the DM atomic state only contains the DC
10149 		 * context and can safely be annulled, we can free the state
10150 		 * and clear the associated private object now to free
10151 		 * some memory and avoid a possible use-after-free later.
10152 		 */
10153 
10154 		for (i = 0; i < state->num_private_objs; i++) {
10155 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10156 
10157 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10158 				int j = state->num_private_objs-1;
10159 
10160 				dm_atomic_destroy_state(obj,
10161 						state->private_objs[i].state);
10162 
10163 				/* If i is not at the end of the array then the
10164 				 * last element needs to be moved to where i was
10165 				 * before the array can safely be truncated.
10166 				 */
10167 				if (i != j)
10168 					state->private_objs[i] =
10169 						state->private_objs[j];
10170 
10171 				state->private_objs[j].ptr = NULL;
10172 				state->private_objs[j].state = NULL;
10173 				state->private_objs[j].old_state = NULL;
10174 				state->private_objs[j].new_state = NULL;
10175 
10176 				state->num_private_objs = j;
10177 				break;
10178 			}
10179 		}
10180 	}
10181 
10182 	/* Store the overall update type for use later in atomic check. */
10183 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10184 		struct dm_crtc_state *dm_new_crtc_state =
10185 			to_dm_crtc_state(new_crtc_state);
10186 
10187 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10188 							 UPDATE_TYPE_FULL :
10189 							 UPDATE_TYPE_FAST;
10190 	}
10191 
10192 	/* Must be success */
10193 	WARN_ON(ret);
10194 
10195 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10196 
10197 	return ret;
10198 
10199 fail:
10200 	if (ret == -EDEADLK)
10201 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10202 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10203 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10204 	else
10205 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10206 
10207 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10208 
10209 	return ret;
10210 }
10211 
10212 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10213 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10214 {
10215 	uint8_t dpcd_data;
10216 	bool capable = false;
10217 
10218 	if (amdgpu_dm_connector->dc_link &&
10219 		dm_helpers_dp_read_dpcd(
10220 				NULL,
10221 				amdgpu_dm_connector->dc_link,
10222 				DP_DOWN_STREAM_PORT_COUNT,
10223 				&dpcd_data,
10224 				sizeof(dpcd_data))) {
10225 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10226 	}
10227 
10228 	return capable;
10229 }
10230 
10231 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10232 		uint8_t *edid_ext, int len,
10233 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10234 {
10235 	int i;
10236 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10237 	struct dc *dc = adev->dm.dc;
10238 
10239 	/* send extension block to DMCU for parsing */
10240 	for (i = 0; i < len; i += 8) {
10241 		bool res;
10242 		int offset;
10243 
10244 		/* send 8 bytes a time */
10245 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10246 			return false;
10247 
10248 		if (i+8 == len) {
10249 			/* EDID block sent completed, expect result */
10250 			int version, min_rate, max_rate;
10251 
10252 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10253 			if (res) {
10254 				/* amd vsdb found */
10255 				vsdb_info->freesync_supported = 1;
10256 				vsdb_info->amd_vsdb_version = version;
10257 				vsdb_info->min_refresh_rate_hz = min_rate;
10258 				vsdb_info->max_refresh_rate_hz = max_rate;
10259 				return true;
10260 			}
10261 			/* not amd vsdb */
10262 			return false;
10263 		}
10264 
10265 		/* check for ack*/
10266 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10267 		if (!res)
10268 			return false;
10269 	}
10270 
10271 	return false;
10272 }
10273 
10274 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10275 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10276 {
10277 	uint8_t *edid_ext = NULL;
10278 	int i;
10279 	bool valid_vsdb_found = false;
10280 
10281 	/*----- drm_find_cea_extension() -----*/
10282 	/* No EDID or EDID extensions */
10283 	if (edid == NULL || edid->extensions == 0)
10284 		return -ENODEV;
10285 
10286 	/* Find CEA extension */
10287 	for (i = 0; i < edid->extensions; i++) {
10288 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10289 		if (edid_ext[0] == CEA_EXT)
10290 			break;
10291 	}
10292 
10293 	if (i == edid->extensions)
10294 		return -ENODEV;
10295 
10296 	/*----- cea_db_offsets() -----*/
10297 	if (edid_ext[0] != CEA_EXT)
10298 		return -ENODEV;
10299 
10300 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10301 
10302 	return valid_vsdb_found ? i : -ENODEV;
10303 }
10304 
10305 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10306 					struct edid *edid)
10307 {
10308 	int i = 0;
10309 	struct detailed_timing *timing;
10310 	struct detailed_non_pixel *data;
10311 	struct detailed_data_monitor_range *range;
10312 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10313 			to_amdgpu_dm_connector(connector);
10314 	struct dm_connector_state *dm_con_state = NULL;
10315 
10316 	struct drm_device *dev = connector->dev;
10317 	struct amdgpu_device *adev = drm_to_adev(dev);
10318 	bool freesync_capable = false;
10319 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10320 
10321 	if (!connector->state) {
10322 		DRM_ERROR("%s - Connector has no state", __func__);
10323 		goto update;
10324 	}
10325 
10326 	if (!edid) {
10327 		dm_con_state = to_dm_connector_state(connector->state);
10328 
10329 		amdgpu_dm_connector->min_vfreq = 0;
10330 		amdgpu_dm_connector->max_vfreq = 0;
10331 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10332 
10333 		goto update;
10334 	}
10335 
10336 	dm_con_state = to_dm_connector_state(connector->state);
10337 
10338 	if (!amdgpu_dm_connector->dc_sink) {
10339 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10340 		goto update;
10341 	}
10342 	if (!adev->dm.freesync_module)
10343 		goto update;
10344 
10345 
10346 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10347 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10348 		bool edid_check_required = false;
10349 
10350 		if (edid) {
10351 			edid_check_required = is_dp_capable_without_timing_msa(
10352 						adev->dm.dc,
10353 						amdgpu_dm_connector);
10354 		}
10355 
10356 		if (edid_check_required == true && (edid->version > 1 ||
10357 		   (edid->version == 1 && edid->revision > 1))) {
10358 			for (i = 0; i < 4; i++) {
10359 
10360 				timing	= &edid->detailed_timings[i];
10361 				data	= &timing->data.other_data;
10362 				range	= &data->data.range;
10363 				/*
10364 				 * Check if monitor has continuous frequency mode
10365 				 */
10366 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10367 					continue;
10368 				/*
10369 				 * Check for flag range limits only. If flag == 1 then
10370 				 * no additional timing information provided.
10371 				 * Default GTF, GTF Secondary curve and CVT are not
10372 				 * supported
10373 				 */
10374 				if (range->flags != 1)
10375 					continue;
10376 
10377 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10378 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10379 				amdgpu_dm_connector->pixel_clock_mhz =
10380 					range->pixel_clock_mhz * 10;
10381 
10382 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10383 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10384 
10385 				break;
10386 			}
10387 
10388 			if (amdgpu_dm_connector->max_vfreq -
10389 			    amdgpu_dm_connector->min_vfreq > 10) {
10390 
10391 				freesync_capable = true;
10392 			}
10393 		}
10394 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10395 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10396 		if (i >= 0 && vsdb_info.freesync_supported) {
10397 			timing  = &edid->detailed_timings[i];
10398 			data    = &timing->data.other_data;
10399 
10400 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10401 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10402 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10403 				freesync_capable = true;
10404 
10405 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10406 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10407 		}
10408 	}
10409 
10410 update:
10411 	if (dm_con_state)
10412 		dm_con_state->freesync_capable = freesync_capable;
10413 
10414 	if (connector->vrr_capable_property)
10415 		drm_connector_set_vrr_capable_property(connector,
10416 						       freesync_capable);
10417 }
10418 
10419 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10420 {
10421 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10422 
10423 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10424 		return;
10425 	if (link->type == dc_connection_none)
10426 		return;
10427 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10428 					dpcd_data, sizeof(dpcd_data))) {
10429 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10430 
10431 		if (dpcd_data[0] == 0) {
10432 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10433 			link->psr_settings.psr_feature_enabled = false;
10434 		} else {
10435 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10436 			link->psr_settings.psr_feature_enabled = true;
10437 		}
10438 
10439 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10440 	}
10441 }
10442 
10443 /*
10444  * amdgpu_dm_link_setup_psr() - configure psr link
10445  * @stream: stream state
10446  *
10447  * Return: true if success
10448  */
10449 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10450 {
10451 	struct dc_link *link = NULL;
10452 	struct psr_config psr_config = {0};
10453 	struct psr_context psr_context = {0};
10454 	bool ret = false;
10455 
10456 	if (stream == NULL)
10457 		return false;
10458 
10459 	link = stream->link;
10460 
10461 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10462 
10463 	if (psr_config.psr_version > 0) {
10464 		psr_config.psr_exit_link_training_required = 0x1;
10465 		psr_config.psr_frame_capture_indication_req = 0;
10466 		psr_config.psr_rfb_setup_time = 0x37;
10467 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10468 		psr_config.allow_smu_optimizations = 0x0;
10469 
10470 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10471 
10472 	}
10473 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10474 
10475 	return ret;
10476 }
10477 
10478 /*
10479  * amdgpu_dm_psr_enable() - enable psr f/w
10480  * @stream: stream state
10481  *
10482  * Return: true if success
10483  */
10484 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10485 {
10486 	struct dc_link *link = stream->link;
10487 	unsigned int vsync_rate_hz = 0;
10488 	struct dc_static_screen_params params = {0};
10489 	/* Calculate number of static frames before generating interrupt to
10490 	 * enter PSR.
10491 	 */
10492 	// Init fail safe of 2 frames static
10493 	unsigned int num_frames_static = 2;
10494 
10495 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10496 
10497 	vsync_rate_hz = div64_u64(div64_u64((
10498 			stream->timing.pix_clk_100hz * 100),
10499 			stream->timing.v_total),
10500 			stream->timing.h_total);
10501 
10502 	/* Round up
10503 	 * Calculate number of frames such that at least 30 ms of time has
10504 	 * passed.
10505 	 */
10506 	if (vsync_rate_hz != 0) {
10507 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10508 		num_frames_static = (30000 / frame_time_microsec) + 1;
10509 	}
10510 
10511 	params.triggers.cursor_update = true;
10512 	params.triggers.overlay_update = true;
10513 	params.triggers.surface_update = true;
10514 	params.num_frames = num_frames_static;
10515 
10516 	dc_stream_set_static_screen_params(link->ctx->dc,
10517 					   &stream, 1,
10518 					   &params);
10519 
10520 	return dc_link_set_psr_allow_active(link, true, false, false);
10521 }
10522 
10523 /*
10524  * amdgpu_dm_psr_disable() - disable psr f/w
10525  * @stream:  stream state
10526  *
10527  * Return: true if success
10528  */
10529 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10530 {
10531 
10532 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10533 
10534 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10535 }
10536 
10537 /*
10538  * amdgpu_dm_psr_disable() - disable psr f/w
10539  * if psr is enabled on any stream
10540  *
10541  * Return: true if success
10542  */
10543 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10544 {
10545 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10546 	return dc_set_psr_allow_active(dm->dc, false);
10547 }
10548 
10549 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10550 {
10551 	struct amdgpu_device *adev = drm_to_adev(dev);
10552 	struct dc *dc = adev->dm.dc;
10553 	int i;
10554 
10555 	mutex_lock(&adev->dm.dc_lock);
10556 	if (dc->current_state) {
10557 		for (i = 0; i < dc->current_state->stream_count; ++i)
10558 			dc->current_state->streams[i]
10559 				->triggered_crtc_reset.enabled =
10560 				adev->dm.force_timing_sync;
10561 
10562 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10563 		dc_trigger_sync(dc, dc->current_state);
10564 	}
10565 	mutex_unlock(&adev->dm.dc_lock);
10566 }
10567 
10568 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10569 		       uint32_t value, const char *func_name)
10570 {
10571 #ifdef DM_CHECK_ADDR_0
10572 	if (address == 0) {
10573 		DC_ERR("invalid register write. address = 0");
10574 		return;
10575 	}
10576 #endif
10577 	cgs_write_register(ctx->cgs_device, address, value);
10578 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10579 }
10580 
10581 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10582 			  const char *func_name)
10583 {
10584 	uint32_t value;
10585 #ifdef DM_CHECK_ADDR_0
10586 	if (address == 0) {
10587 		DC_ERR("invalid register read; address = 0\n");
10588 		return 0;
10589 	}
10590 #endif
10591 
10592 	if (ctx->dmub_srv &&
10593 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10594 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10595 		ASSERT(false);
10596 		return 0;
10597 	}
10598 
10599 	value = cgs_read_register(ctx->cgs_device, address);
10600 
10601 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10602 
10603 	return value;
10604 }
10605