xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 49c70ece54b0d1c51bc31b2b0c1070777c992c26)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 static bool
216 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
217 				 struct drm_crtc_state *new_crtc_state);
218 /*
219  * dm_vblank_get_counter
220  *
221  * @brief
222  * Get counter for number of vertical blanks
223  *
224  * @param
225  * struct amdgpu_device *adev - [in] desired amdgpu device
226  * int disp_idx - [in] which CRTC to get the counter from
227  *
228  * @return
229  * Counter for vertical blanks
230  */
231 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
232 {
233 	if (crtc >= adev->mode_info.num_crtc)
234 		return 0;
235 	else {
236 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
237 
238 		if (acrtc->dm_irq_params.stream == NULL) {
239 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
240 				  crtc);
241 			return 0;
242 		}
243 
244 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 	}
246 }
247 
248 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
249 				  u32 *vbl, u32 *position)
250 {
251 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
252 
253 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254 		return -EINVAL;
255 	else {
256 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257 
258 		if (acrtc->dm_irq_params.stream ==  NULL) {
259 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260 				  crtc);
261 			return 0;
262 		}
263 
264 		/*
265 		 * TODO rework base driver to use values directly.
266 		 * for now parse it back into reg-format
267 		 */
268 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
269 					 &v_blank_start,
270 					 &v_blank_end,
271 					 &h_position,
272 					 &v_position);
273 
274 		*position = v_position | (h_position << 16);
275 		*vbl = v_blank_start | (v_blank_end << 16);
276 	}
277 
278 	return 0;
279 }
280 
281 static bool dm_is_idle(void *handle)
282 {
283 	/* XXX todo */
284 	return true;
285 }
286 
287 static int dm_wait_for_idle(void *handle)
288 {
289 	/* XXX todo */
290 	return 0;
291 }
292 
293 static bool dm_check_soft_reset(void *handle)
294 {
295 	return false;
296 }
297 
298 static int dm_soft_reset(void *handle)
299 {
300 	/* XXX todo */
301 	return 0;
302 }
303 
304 static struct amdgpu_crtc *
305 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306 		     int otg_inst)
307 {
308 	struct drm_device *dev = adev_to_drm(adev);
309 	struct drm_crtc *crtc;
310 	struct amdgpu_crtc *amdgpu_crtc;
311 
312 	if (otg_inst == -1) {
313 		WARN_ON(1);
314 		return adev->mode_info.crtcs[0];
315 	}
316 
317 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
318 		amdgpu_crtc = to_amdgpu_crtc(crtc);
319 
320 		if (amdgpu_crtc->otg_inst == otg_inst)
321 			return amdgpu_crtc;
322 	}
323 
324 	return NULL;
325 }
326 
327 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
328 {
329 	return acrtc->dm_irq_params.freesync_config.state ==
330 		       VRR_STATE_ACTIVE_VARIABLE ||
331 	       acrtc->dm_irq_params.freesync_config.state ==
332 		       VRR_STATE_ACTIVE_FIXED;
333 }
334 
335 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
336 {
337 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
338 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342 					      struct dm_crtc_state *new_state)
343 {
344 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
345 		return true;
346 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
347 		return true;
348 	else
349 		return false;
350 }
351 
352 /**
353  * dm_pflip_high_irq() - Handle pageflip interrupt
354  * @interrupt_params: ignored
355  *
356  * Handles the pageflip interrupt by notifying all interested parties
357  * that the pageflip has been completed.
358  */
359 static void dm_pflip_high_irq(void *interrupt_params)
360 {
361 	struct amdgpu_crtc *amdgpu_crtc;
362 	struct common_irq_params *irq_params = interrupt_params;
363 	struct amdgpu_device *adev = irq_params->adev;
364 	unsigned long flags;
365 	struct drm_pending_vblank_event *e;
366 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
367 	bool vrr_active;
368 
369 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
370 
371 	/* IRQ could occur when in initial stage */
372 	/* TODO work and BO cleanup */
373 	if (amdgpu_crtc == NULL) {
374 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
375 		return;
376 	}
377 
378 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
379 
380 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
381 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
382 						 amdgpu_crtc->pflip_status,
383 						 AMDGPU_FLIP_SUBMITTED,
384 						 amdgpu_crtc->crtc_id,
385 						 amdgpu_crtc);
386 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
387 		return;
388 	}
389 
390 	/* page flip completed. */
391 	e = amdgpu_crtc->event;
392 	amdgpu_crtc->event = NULL;
393 
394 	if (!e)
395 		WARN_ON(1);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 			 vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	unsigned long flags;
463 	int vrr_active;
464 
465 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466 
467 	if (acrtc) {
468 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469 
470 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
471 			      acrtc->crtc_id,
472 			      vrr_active);
473 
474 		/* Core vblank handling is done here after end of front-porch in
475 		 * vrr mode, as vblank timestamping will give valid results
476 		 * while now done after front-porch. This will also deliver
477 		 * page-flip completion events that have been queued to us
478 		 * if a pageflip happened inside front-porch.
479 		 */
480 		if (vrr_active) {
481 			drm_crtc_handle_vblank(&acrtc->base);
482 
483 			/* BTR processing for pre-DCE12 ASICs */
484 			if (acrtc->dm_irq_params.stream &&
485 			    adev->family < AMDGPU_FAMILY_AI) {
486 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
487 				mod_freesync_handle_v_update(
488 				    adev->dm.freesync_module,
489 				    acrtc->dm_irq_params.stream,
490 				    &acrtc->dm_irq_params.vrr_params);
491 
492 				dc_stream_adjust_vmin_vmax(
493 				    adev->dm.dc,
494 				    acrtc->dm_irq_params.stream,
495 				    &acrtc->dm_irq_params.vrr_params.adjust);
496 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
497 			}
498 		}
499 	}
500 }
501 
502 /**
503  * dm_crtc_high_irq() - Handles CRTC interrupt
504  * @interrupt_params: used for determining the CRTC instance
505  *
506  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
507  * event handler.
508  */
509 static void dm_crtc_high_irq(void *interrupt_params)
510 {
511 	struct common_irq_params *irq_params = interrupt_params;
512 	struct amdgpu_device *adev = irq_params->adev;
513 	struct amdgpu_crtc *acrtc;
514 	unsigned long flags;
515 	int vrr_active;
516 
517 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
518 	if (!acrtc)
519 		return;
520 
521 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
522 
523 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
524 		      vrr_active, acrtc->dm_irq_params.active_planes);
525 
526 	/**
527 	 * Core vblank handling at start of front-porch is only possible
528 	 * in non-vrr mode, as only there vblank timestamping will give
529 	 * valid results while done in front-porch. Otherwise defer it
530 	 * to dm_vupdate_high_irq after end of front-porch.
531 	 */
532 	if (!vrr_active)
533 		drm_crtc_handle_vblank(&acrtc->base);
534 
535 	/**
536 	 * Following stuff must happen at start of vblank, for crc
537 	 * computation and below-the-range btr support in vrr mode.
538 	 */
539 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
540 
541 	/* BTR updates need to happen before VUPDATE on Vega and above. */
542 	if (adev->family < AMDGPU_FAMILY_AI)
543 		return;
544 
545 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
546 
547 	if (acrtc->dm_irq_params.stream &&
548 	    acrtc->dm_irq_params.vrr_params.supported &&
549 	    acrtc->dm_irq_params.freesync_config.state ==
550 		    VRR_STATE_ACTIVE_VARIABLE) {
551 		mod_freesync_handle_v_update(adev->dm.freesync_module,
552 					     acrtc->dm_irq_params.stream,
553 					     &acrtc->dm_irq_params.vrr_params);
554 
555 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
556 					   &acrtc->dm_irq_params.vrr_params.adjust);
557 	}
558 
559 	/*
560 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
561 	 * In that case, pageflip completion interrupts won't fire and pageflip
562 	 * completion events won't get delivered. Prevent this by sending
563 	 * pending pageflip events from here if a flip is still pending.
564 	 *
565 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
566 	 * avoid race conditions between flip programming and completion,
567 	 * which could cause too early flip completion events.
568 	 */
569 	if (adev->family >= AMDGPU_FAMILY_RV &&
570 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
571 	    acrtc->dm_irq_params.active_planes == 0) {
572 		if (acrtc->event) {
573 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
574 			acrtc->event = NULL;
575 			drm_crtc_vblank_put(&acrtc->base);
576 		}
577 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
578 	}
579 
580 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
581 }
582 
583 #if defined(CONFIG_DRM_AMD_DC_DCN)
584 /**
585  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
586  * DCN generation ASICs
587  * @interrupt params - interrupt parameters
588  *
589  * Used to set crc window/read out crc value at vertical line 0 position
590  */
591 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
592 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
593 {
594 	struct common_irq_params *irq_params = interrupt_params;
595 	struct amdgpu_device *adev = irq_params->adev;
596 	struct amdgpu_crtc *acrtc;
597 
598 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
599 
600 	if (!acrtc)
601 		return;
602 
603 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
604 }
605 #endif
606 #endif
607 
608 static int dm_set_clockgating_state(void *handle,
609 		  enum amd_clockgating_state state)
610 {
611 	return 0;
612 }
613 
614 static int dm_set_powergating_state(void *handle,
615 		  enum amd_powergating_state state)
616 {
617 	return 0;
618 }
619 
620 /* Prototypes of private functions */
621 static int dm_early_init(void* handle);
622 
623 /* Allocate memory for FBC compressed data  */
624 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
625 {
626 	struct drm_device *dev = connector->dev;
627 	struct amdgpu_device *adev = drm_to_adev(dev);
628 	struct dm_compressor_info *compressor = &adev->dm.compressor;
629 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
630 	struct drm_display_mode *mode;
631 	unsigned long max_size = 0;
632 
633 	if (adev->dm.dc->fbc_compressor == NULL)
634 		return;
635 
636 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
637 		return;
638 
639 	if (compressor->bo_ptr)
640 		return;
641 
642 
643 	list_for_each_entry(mode, &connector->modes, head) {
644 		if (max_size < mode->htotal * mode->vtotal)
645 			max_size = mode->htotal * mode->vtotal;
646 	}
647 
648 	if (max_size) {
649 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
650 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
651 			    &compressor->gpu_addr, &compressor->cpu_addr);
652 
653 		if (r)
654 			DRM_ERROR("DM: Failed to initialize FBC\n");
655 		else {
656 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
657 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
658 		}
659 
660 	}
661 
662 }
663 
664 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
665 					  int pipe, bool *enabled,
666 					  unsigned char *buf, int max_bytes)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_connector *connector;
671 	struct drm_connector_list_iter conn_iter;
672 	struct amdgpu_dm_connector *aconnector;
673 	int ret = 0;
674 
675 	*enabled = false;
676 
677 	mutex_lock(&adev->dm.audio_lock);
678 
679 	drm_connector_list_iter_begin(dev, &conn_iter);
680 	drm_for_each_connector_iter(connector, &conn_iter) {
681 		aconnector = to_amdgpu_dm_connector(connector);
682 		if (aconnector->audio_inst != port)
683 			continue;
684 
685 		*enabled = true;
686 		ret = drm_eld_size(connector->eld);
687 		memcpy(buf, connector->eld, min(max_bytes, ret));
688 
689 		break;
690 	}
691 	drm_connector_list_iter_end(&conn_iter);
692 
693 	mutex_unlock(&adev->dm.audio_lock);
694 
695 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
696 
697 	return ret;
698 }
699 
700 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
701 	.get_eld = amdgpu_dm_audio_component_get_eld,
702 };
703 
704 static int amdgpu_dm_audio_component_bind(struct device *kdev,
705 				       struct device *hda_kdev, void *data)
706 {
707 	struct drm_device *dev = dev_get_drvdata(kdev);
708 	struct amdgpu_device *adev = drm_to_adev(dev);
709 	struct drm_audio_component *acomp = data;
710 
711 	acomp->ops = &amdgpu_dm_audio_component_ops;
712 	acomp->dev = kdev;
713 	adev->dm.audio_component = acomp;
714 
715 	return 0;
716 }
717 
718 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
719 					  struct device *hda_kdev, void *data)
720 {
721 	struct drm_device *dev = dev_get_drvdata(kdev);
722 	struct amdgpu_device *adev = drm_to_adev(dev);
723 	struct drm_audio_component *acomp = data;
724 
725 	acomp->ops = NULL;
726 	acomp->dev = NULL;
727 	adev->dm.audio_component = NULL;
728 }
729 
730 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
731 	.bind	= amdgpu_dm_audio_component_bind,
732 	.unbind	= amdgpu_dm_audio_component_unbind,
733 };
734 
735 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
736 {
737 	int i, ret;
738 
739 	if (!amdgpu_audio)
740 		return 0;
741 
742 	adev->mode_info.audio.enabled = true;
743 
744 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
745 
746 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
747 		adev->mode_info.audio.pin[i].channels = -1;
748 		adev->mode_info.audio.pin[i].rate = -1;
749 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
750 		adev->mode_info.audio.pin[i].status_bits = 0;
751 		adev->mode_info.audio.pin[i].category_code = 0;
752 		adev->mode_info.audio.pin[i].connected = false;
753 		adev->mode_info.audio.pin[i].id =
754 			adev->dm.dc->res_pool->audios[i]->inst;
755 		adev->mode_info.audio.pin[i].offset = 0;
756 	}
757 
758 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
759 	if (ret < 0)
760 		return ret;
761 
762 	adev->dm.audio_registered = true;
763 
764 	return 0;
765 }
766 
767 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
768 {
769 	if (!amdgpu_audio)
770 		return;
771 
772 	if (!adev->mode_info.audio.enabled)
773 		return;
774 
775 	if (adev->dm.audio_registered) {
776 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 		adev->dm.audio_registered = false;
778 	}
779 
780 	/* TODO: Disable audio? */
781 
782 	adev->mode_info.audio.enabled = false;
783 }
784 
785 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
786 {
787 	struct drm_audio_component *acomp = adev->dm.audio_component;
788 
789 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
790 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
791 
792 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
793 						 pin, -1);
794 	}
795 }
796 
797 static int dm_dmub_hw_init(struct amdgpu_device *adev)
798 {
799 	const struct dmcub_firmware_header_v1_0 *hdr;
800 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
801 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
802 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
803 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
804 	struct abm *abm = adev->dm.dc->res_pool->abm;
805 	struct dmub_srv_hw_params hw_params;
806 	enum dmub_status status;
807 	const unsigned char *fw_inst_const, *fw_bss_data;
808 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
809 	bool has_hw_support;
810 
811 	if (!dmub_srv)
812 		/* DMUB isn't supported on the ASIC. */
813 		return 0;
814 
815 	if (!fb_info) {
816 		DRM_ERROR("No framebuffer info for DMUB service.\n");
817 		return -EINVAL;
818 	}
819 
820 	if (!dmub_fw) {
821 		/* Firmware required for DMUB support. */
822 		DRM_ERROR("No firmware provided for DMUB.\n");
823 		return -EINVAL;
824 	}
825 
826 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
827 	if (status != DMUB_STATUS_OK) {
828 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
829 		return -EINVAL;
830 	}
831 
832 	if (!has_hw_support) {
833 		DRM_INFO("DMUB unsupported on ASIC\n");
834 		return 0;
835 	}
836 
837 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
838 
839 	fw_inst_const = dmub_fw->data +
840 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
841 			PSP_HEADER_BYTES;
842 
843 	fw_bss_data = dmub_fw->data +
844 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
845 		      le32_to_cpu(hdr->inst_const_bytes);
846 
847 	/* Copy firmware and bios info into FB memory. */
848 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
849 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
850 
851 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
852 
853 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
854 	 * amdgpu_ucode_init_single_fw will load dmub firmware
855 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
856 	 * will be done by dm_dmub_hw_init
857 	 */
858 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
859 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
860 				fw_inst_const_size);
861 	}
862 
863 	if (fw_bss_data_size)
864 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
865 		       fw_bss_data, fw_bss_data_size);
866 
867 	/* Copy firmware bios info into FB memory. */
868 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
869 	       adev->bios_size);
870 
871 	/* Reset regions that need to be reset. */
872 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
873 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
874 
875 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
876 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
877 
878 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
879 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
880 
881 	/* Initialize hardware. */
882 	memset(&hw_params, 0, sizeof(hw_params));
883 	hw_params.fb_base = adev->gmc.fb_start;
884 	hw_params.fb_offset = adev->gmc.aper_base;
885 
886 	/* backdoor load firmware and trigger dmub running */
887 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
888 		hw_params.load_inst_const = true;
889 
890 	if (dmcu)
891 		hw_params.psp_version = dmcu->psp_version;
892 
893 	for (i = 0; i < fb_info->num_fb; ++i)
894 		hw_params.fb[i] = &fb_info->fb[i];
895 
896 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
897 	if (status != DMUB_STATUS_OK) {
898 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
899 		return -EINVAL;
900 	}
901 
902 	/* Wait for firmware load to finish. */
903 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
904 	if (status != DMUB_STATUS_OK)
905 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
906 
907 	/* Init DMCU and ABM if available. */
908 	if (dmcu && abm) {
909 		dmcu->funcs->dmcu_init(dmcu);
910 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
911 	}
912 
913 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
914 	if (!adev->dm.dc->ctx->dmub_srv) {
915 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
916 		return -ENOMEM;
917 	}
918 
919 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
920 		 adev->dm.dmcub_fw_version);
921 
922 	return 0;
923 }
924 
925 #define DMUB_TRACE_MAX_READ 64
926 static void dm_dmub_trace_high_irq(void *interrupt_params)
927 {
928 	struct common_irq_params *irq_params = interrupt_params;
929 	struct amdgpu_device *adev = irq_params->adev;
930 	struct amdgpu_display_manager *dm = &adev->dm;
931 	struct dmcub_trace_buf_entry entry = { 0 };
932 	uint32_t count = 0;
933 
934 	do {
935 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
936 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
937 							entry.param0, entry.param1);
938 
939 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
940 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
941 		} else
942 			break;
943 
944 		count++;
945 
946 	} while (count <= DMUB_TRACE_MAX_READ);
947 
948 	ASSERT(count <= DMUB_TRACE_MAX_READ);
949 }
950 
951 #if defined(CONFIG_DRM_AMD_DC_DCN)
952 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
953 {
954 	uint64_t pt_base;
955 	uint32_t logical_addr_low;
956 	uint32_t logical_addr_high;
957 	uint32_t agp_base, agp_bot, agp_top;
958 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
959 
960 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
961 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
962 
963 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
964 		/*
965 		 * Raven2 has a HW issue that it is unable to use the vram which
966 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
967 		 * workaround that increase system aperture high address (add 1)
968 		 * to get rid of the VM fault and hardware hang.
969 		 */
970 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
971 	else
972 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
973 
974 	agp_base = 0;
975 	agp_bot = adev->gmc.agp_start >> 24;
976 	agp_top = adev->gmc.agp_end >> 24;
977 
978 
979 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
980 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
981 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
982 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
983 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
984 	page_table_base.low_part = lower_32_bits(pt_base);
985 
986 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
987 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
988 
989 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
990 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
991 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
992 
993 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
994 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
995 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
996 
997 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
998 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
999 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1000 
1001 	pa_config->is_hvm_enabled = 0;
1002 
1003 }
1004 #endif
1005 #if defined(CONFIG_DRM_AMD_DC_DCN)
1006 static void event_mall_stutter(struct work_struct *work)
1007 {
1008 
1009 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1010 	struct amdgpu_display_manager *dm = vblank_work->dm;
1011 
1012 	mutex_lock(&dm->dc_lock);
1013 
1014 	if (vblank_work->enable)
1015 		dm->active_vblank_irq_count++;
1016 	else
1017 		dm->active_vblank_irq_count--;
1018 
1019 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1020 
1021 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1022 
1023 
1024 	mutex_unlock(&dm->dc_lock);
1025 }
1026 
1027 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1028 {
1029 
1030 	int max_caps = dc->caps.max_links;
1031 	struct vblank_workqueue *vblank_work;
1032 	int i = 0;
1033 
1034 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1035 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1036 		kfree(vblank_work);
1037 		return NULL;
1038 	}
1039 
1040 	for (i = 0; i < max_caps; i++)
1041 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1042 
1043 	return vblank_work;
1044 }
1045 #endif
1046 static int amdgpu_dm_init(struct amdgpu_device *adev)
1047 {
1048 	struct dc_init_data init_data;
1049 #ifdef CONFIG_DRM_AMD_DC_HDCP
1050 	struct dc_callback_init init_params;
1051 #endif
1052 	int r;
1053 
1054 	adev->dm.ddev = adev_to_drm(adev);
1055 	adev->dm.adev = adev;
1056 
1057 	/* Zero all the fields */
1058 	memset(&init_data, 0, sizeof(init_data));
1059 #ifdef CONFIG_DRM_AMD_DC_HDCP
1060 	memset(&init_params, 0, sizeof(init_params));
1061 #endif
1062 
1063 	mutex_init(&adev->dm.dc_lock);
1064 	mutex_init(&adev->dm.audio_lock);
1065 #if defined(CONFIG_DRM_AMD_DC_DCN)
1066 	spin_lock_init(&adev->dm.vblank_lock);
1067 #endif
1068 
1069 	if(amdgpu_dm_irq_init(adev)) {
1070 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1071 		goto error;
1072 	}
1073 
1074 	init_data.asic_id.chip_family = adev->family;
1075 
1076 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1077 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1078 
1079 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1080 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1081 	init_data.asic_id.atombios_base_address =
1082 		adev->mode_info.atom_context->bios;
1083 
1084 	init_data.driver = adev;
1085 
1086 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1087 
1088 	if (!adev->dm.cgs_device) {
1089 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1090 		goto error;
1091 	}
1092 
1093 	init_data.cgs_device = adev->dm.cgs_device;
1094 
1095 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1096 
1097 	switch (adev->asic_type) {
1098 	case CHIP_CARRIZO:
1099 	case CHIP_STONEY:
1100 	case CHIP_RAVEN:
1101 	case CHIP_RENOIR:
1102 		init_data.flags.gpu_vm_support = true;
1103 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1104 			init_data.flags.disable_dmcu = true;
1105 		break;
1106 #if defined(CONFIG_DRM_AMD_DC_DCN)
1107 	case CHIP_VANGOGH:
1108 		init_data.flags.gpu_vm_support = true;
1109 		break;
1110 #endif
1111 	default:
1112 		break;
1113 	}
1114 
1115 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1116 		init_data.flags.fbc_support = true;
1117 
1118 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1119 		init_data.flags.multi_mon_pp_mclk_switch = true;
1120 
1121 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1122 		init_data.flags.disable_fractional_pwm = true;
1123 
1124 	init_data.flags.power_down_display_on_boot = true;
1125 
1126 	INIT_LIST_HEAD(&adev->dm.da_list);
1127 	/* Display Core create. */
1128 	adev->dm.dc = dc_create(&init_data);
1129 
1130 	if (adev->dm.dc) {
1131 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1132 	} else {
1133 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1134 		goto error;
1135 	}
1136 
1137 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1138 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1139 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1140 	}
1141 
1142 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1143 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1144 
1145 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1146 		adev->dm.dc->debug.disable_stutter = true;
1147 
1148 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1149 		adev->dm.dc->debug.disable_dsc = true;
1150 
1151 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1152 		adev->dm.dc->debug.disable_clock_gate = true;
1153 
1154 	r = dm_dmub_hw_init(adev);
1155 	if (r) {
1156 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1157 		goto error;
1158 	}
1159 
1160 	dc_hardware_init(adev->dm.dc);
1161 
1162 #if defined(CONFIG_DRM_AMD_DC_DCN)
1163 	if (adev->apu_flags) {
1164 		struct dc_phy_addr_space_config pa_config;
1165 
1166 		mmhub_read_system_context(adev, &pa_config);
1167 
1168 		// Call the DC init_memory func
1169 		dc_setup_system_context(adev->dm.dc, &pa_config);
1170 	}
1171 #endif
1172 
1173 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1174 	if (!adev->dm.freesync_module) {
1175 		DRM_ERROR(
1176 		"amdgpu: failed to initialize freesync_module.\n");
1177 	} else
1178 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1179 				adev->dm.freesync_module);
1180 
1181 	amdgpu_dm_init_color_mod();
1182 
1183 #if defined(CONFIG_DRM_AMD_DC_DCN)
1184 	if (adev->dm.dc->caps.max_links > 0) {
1185 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1186 
1187 		if (!adev->dm.vblank_workqueue)
1188 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1189 		else
1190 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1191 	}
1192 #endif
1193 
1194 #ifdef CONFIG_DRM_AMD_DC_HDCP
1195 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1196 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1197 
1198 		if (!adev->dm.hdcp_workqueue)
1199 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1200 		else
1201 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1202 
1203 		dc_init_callbacks(adev->dm.dc, &init_params);
1204 	}
1205 #endif
1206 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1207 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1208 #endif
1209 	if (amdgpu_dm_initialize_drm_device(adev)) {
1210 		DRM_ERROR(
1211 		"amdgpu: failed to initialize sw for display support.\n");
1212 		goto error;
1213 	}
1214 
1215 	/* create fake encoders for MST */
1216 	dm_dp_create_fake_mst_encoders(adev);
1217 
1218 	/* TODO: Add_display_info? */
1219 
1220 	/* TODO use dynamic cursor width */
1221 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1222 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1223 
1224 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1225 		DRM_ERROR(
1226 		"amdgpu: failed to initialize sw for display support.\n");
1227 		goto error;
1228 	}
1229 
1230 
1231 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1232 
1233 	return 0;
1234 error:
1235 	amdgpu_dm_fini(adev);
1236 
1237 	return -EINVAL;
1238 }
1239 
1240 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1241 {
1242 	int i;
1243 
1244 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1245 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1246 	}
1247 
1248 	amdgpu_dm_audio_fini(adev);
1249 
1250 	amdgpu_dm_destroy_drm_device(&adev->dm);
1251 
1252 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1253 	if (adev->dm.crc_rd_wrk) {
1254 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1255 		kfree(adev->dm.crc_rd_wrk);
1256 		adev->dm.crc_rd_wrk = NULL;
1257 	}
1258 #endif
1259 #ifdef CONFIG_DRM_AMD_DC_HDCP
1260 	if (adev->dm.hdcp_workqueue) {
1261 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1262 		adev->dm.hdcp_workqueue = NULL;
1263 	}
1264 
1265 	if (adev->dm.dc)
1266 		dc_deinit_callbacks(adev->dm.dc);
1267 #endif
1268 
1269 #if defined(CONFIG_DRM_AMD_DC_DCN)
1270 	if (adev->dm.vblank_workqueue) {
1271 		adev->dm.vblank_workqueue->dm = NULL;
1272 		kfree(adev->dm.vblank_workqueue);
1273 		adev->dm.vblank_workqueue = NULL;
1274 	}
1275 #endif
1276 
1277 	if (adev->dm.dc->ctx->dmub_srv) {
1278 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1279 		adev->dm.dc->ctx->dmub_srv = NULL;
1280 	}
1281 
1282 	if (adev->dm.dmub_bo)
1283 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1284 				      &adev->dm.dmub_bo_gpu_addr,
1285 				      &adev->dm.dmub_bo_cpu_addr);
1286 
1287 	/* DC Destroy TODO: Replace destroy DAL */
1288 	if (adev->dm.dc)
1289 		dc_destroy(&adev->dm.dc);
1290 	/*
1291 	 * TODO: pageflip, vlank interrupt
1292 	 *
1293 	 * amdgpu_dm_irq_fini(adev);
1294 	 */
1295 
1296 	if (adev->dm.cgs_device) {
1297 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1298 		adev->dm.cgs_device = NULL;
1299 	}
1300 	if (adev->dm.freesync_module) {
1301 		mod_freesync_destroy(adev->dm.freesync_module);
1302 		adev->dm.freesync_module = NULL;
1303 	}
1304 
1305 	mutex_destroy(&adev->dm.audio_lock);
1306 	mutex_destroy(&adev->dm.dc_lock);
1307 
1308 	return;
1309 }
1310 
1311 static int load_dmcu_fw(struct amdgpu_device *adev)
1312 {
1313 	const char *fw_name_dmcu = NULL;
1314 	int r;
1315 	const struct dmcu_firmware_header_v1_0 *hdr;
1316 
1317 	switch(adev->asic_type) {
1318 #if defined(CONFIG_DRM_AMD_DC_SI)
1319 	case CHIP_TAHITI:
1320 	case CHIP_PITCAIRN:
1321 	case CHIP_VERDE:
1322 	case CHIP_OLAND:
1323 #endif
1324 	case CHIP_BONAIRE:
1325 	case CHIP_HAWAII:
1326 	case CHIP_KAVERI:
1327 	case CHIP_KABINI:
1328 	case CHIP_MULLINS:
1329 	case CHIP_TONGA:
1330 	case CHIP_FIJI:
1331 	case CHIP_CARRIZO:
1332 	case CHIP_STONEY:
1333 	case CHIP_POLARIS11:
1334 	case CHIP_POLARIS10:
1335 	case CHIP_POLARIS12:
1336 	case CHIP_VEGAM:
1337 	case CHIP_VEGA10:
1338 	case CHIP_VEGA12:
1339 	case CHIP_VEGA20:
1340 	case CHIP_NAVI10:
1341 	case CHIP_NAVI14:
1342 	case CHIP_RENOIR:
1343 	case CHIP_SIENNA_CICHLID:
1344 	case CHIP_NAVY_FLOUNDER:
1345 	case CHIP_DIMGREY_CAVEFISH:
1346 	case CHIP_VANGOGH:
1347 		return 0;
1348 	case CHIP_NAVI12:
1349 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1350 		break;
1351 	case CHIP_RAVEN:
1352 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1353 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1354 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1355 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1356 		else
1357 			return 0;
1358 		break;
1359 	default:
1360 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1365 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1366 		return 0;
1367 	}
1368 
1369 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1370 	if (r == -ENOENT) {
1371 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1372 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1373 		adev->dm.fw_dmcu = NULL;
1374 		return 0;
1375 	}
1376 	if (r) {
1377 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1378 			fw_name_dmcu);
1379 		return r;
1380 	}
1381 
1382 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1383 	if (r) {
1384 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1385 			fw_name_dmcu);
1386 		release_firmware(adev->dm.fw_dmcu);
1387 		adev->dm.fw_dmcu = NULL;
1388 		return r;
1389 	}
1390 
1391 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1392 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1393 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1394 	adev->firmware.fw_size +=
1395 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1396 
1397 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1398 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1399 	adev->firmware.fw_size +=
1400 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1401 
1402 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1403 
1404 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1405 
1406 	return 0;
1407 }
1408 
1409 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1410 {
1411 	struct amdgpu_device *adev = ctx;
1412 
1413 	return dm_read_reg(adev->dm.dc->ctx, address);
1414 }
1415 
1416 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1417 				     uint32_t value)
1418 {
1419 	struct amdgpu_device *adev = ctx;
1420 
1421 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1422 }
1423 
1424 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1425 {
1426 	struct dmub_srv_create_params create_params;
1427 	struct dmub_srv_region_params region_params;
1428 	struct dmub_srv_region_info region_info;
1429 	struct dmub_srv_fb_params fb_params;
1430 	struct dmub_srv_fb_info *fb_info;
1431 	struct dmub_srv *dmub_srv;
1432 	const struct dmcub_firmware_header_v1_0 *hdr;
1433 	const char *fw_name_dmub;
1434 	enum dmub_asic dmub_asic;
1435 	enum dmub_status status;
1436 	int r;
1437 
1438 	switch (adev->asic_type) {
1439 	case CHIP_RENOIR:
1440 		dmub_asic = DMUB_ASIC_DCN21;
1441 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1442 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1443 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1444 		break;
1445 	case CHIP_SIENNA_CICHLID:
1446 		dmub_asic = DMUB_ASIC_DCN30;
1447 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1448 		break;
1449 	case CHIP_NAVY_FLOUNDER:
1450 		dmub_asic = DMUB_ASIC_DCN30;
1451 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1452 		break;
1453 	case CHIP_VANGOGH:
1454 		dmub_asic = DMUB_ASIC_DCN301;
1455 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1456 		break;
1457 	case CHIP_DIMGREY_CAVEFISH:
1458 		dmub_asic = DMUB_ASIC_DCN302;
1459 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1460 		break;
1461 
1462 	default:
1463 		/* ASIC doesn't support DMUB. */
1464 		return 0;
1465 	}
1466 
1467 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1468 	if (r) {
1469 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1470 		return 0;
1471 	}
1472 
1473 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1474 	if (r) {
1475 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1476 		return 0;
1477 	}
1478 
1479 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1480 
1481 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1482 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1483 			AMDGPU_UCODE_ID_DMCUB;
1484 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1485 			adev->dm.dmub_fw;
1486 		adev->firmware.fw_size +=
1487 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1488 
1489 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1490 			 adev->dm.dmcub_fw_version);
1491 	}
1492 
1493 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1494 
1495 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1496 	dmub_srv = adev->dm.dmub_srv;
1497 
1498 	if (!dmub_srv) {
1499 		DRM_ERROR("Failed to allocate DMUB service!\n");
1500 		return -ENOMEM;
1501 	}
1502 
1503 	memset(&create_params, 0, sizeof(create_params));
1504 	create_params.user_ctx = adev;
1505 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1506 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1507 	create_params.asic = dmub_asic;
1508 
1509 	/* Create the DMUB service. */
1510 	status = dmub_srv_create(dmub_srv, &create_params);
1511 	if (status != DMUB_STATUS_OK) {
1512 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1513 		return -EINVAL;
1514 	}
1515 
1516 	/* Calculate the size of all the regions for the DMUB service. */
1517 	memset(&region_params, 0, sizeof(region_params));
1518 
1519 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1520 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1521 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1522 	region_params.vbios_size = adev->bios_size;
1523 	region_params.fw_bss_data = region_params.bss_data_size ?
1524 		adev->dm.dmub_fw->data +
1525 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1526 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1527 	region_params.fw_inst_const =
1528 		adev->dm.dmub_fw->data +
1529 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1530 		PSP_HEADER_BYTES;
1531 
1532 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1533 					   &region_info);
1534 
1535 	if (status != DMUB_STATUS_OK) {
1536 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1537 		return -EINVAL;
1538 	}
1539 
1540 	/*
1541 	 * Allocate a framebuffer based on the total size of all the regions.
1542 	 * TODO: Move this into GART.
1543 	 */
1544 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1545 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1546 				    &adev->dm.dmub_bo_gpu_addr,
1547 				    &adev->dm.dmub_bo_cpu_addr);
1548 	if (r)
1549 		return r;
1550 
1551 	/* Rebase the regions on the framebuffer address. */
1552 	memset(&fb_params, 0, sizeof(fb_params));
1553 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1554 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1555 	fb_params.region_info = &region_info;
1556 
1557 	adev->dm.dmub_fb_info =
1558 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1559 	fb_info = adev->dm.dmub_fb_info;
1560 
1561 	if (!fb_info) {
1562 		DRM_ERROR(
1563 			"Failed to allocate framebuffer info for DMUB service!\n");
1564 		return -ENOMEM;
1565 	}
1566 
1567 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1568 	if (status != DMUB_STATUS_OK) {
1569 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1570 		return -EINVAL;
1571 	}
1572 
1573 	return 0;
1574 }
1575 
1576 static int dm_sw_init(void *handle)
1577 {
1578 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1579 	int r;
1580 
1581 	r = dm_dmub_sw_init(adev);
1582 	if (r)
1583 		return r;
1584 
1585 	return load_dmcu_fw(adev);
1586 }
1587 
1588 static int dm_sw_fini(void *handle)
1589 {
1590 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591 
1592 	kfree(adev->dm.dmub_fb_info);
1593 	adev->dm.dmub_fb_info = NULL;
1594 
1595 	if (adev->dm.dmub_srv) {
1596 		dmub_srv_destroy(adev->dm.dmub_srv);
1597 		adev->dm.dmub_srv = NULL;
1598 	}
1599 
1600 	release_firmware(adev->dm.dmub_fw);
1601 	adev->dm.dmub_fw = NULL;
1602 
1603 	release_firmware(adev->dm.fw_dmcu);
1604 	adev->dm.fw_dmcu = NULL;
1605 
1606 	return 0;
1607 }
1608 
1609 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1610 {
1611 	struct amdgpu_dm_connector *aconnector;
1612 	struct drm_connector *connector;
1613 	struct drm_connector_list_iter iter;
1614 	int ret = 0;
1615 
1616 	drm_connector_list_iter_begin(dev, &iter);
1617 	drm_for_each_connector_iter(connector, &iter) {
1618 		aconnector = to_amdgpu_dm_connector(connector);
1619 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1620 		    aconnector->mst_mgr.aux) {
1621 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1622 					 aconnector,
1623 					 aconnector->base.base.id);
1624 
1625 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1626 			if (ret < 0) {
1627 				DRM_ERROR("DM_MST: Failed to start MST\n");
1628 				aconnector->dc_link->type =
1629 					dc_connection_single;
1630 				break;
1631 			}
1632 		}
1633 	}
1634 	drm_connector_list_iter_end(&iter);
1635 
1636 	return ret;
1637 }
1638 
1639 static int dm_late_init(void *handle)
1640 {
1641 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1642 
1643 	struct dmcu_iram_parameters params;
1644 	unsigned int linear_lut[16];
1645 	int i;
1646 	struct dmcu *dmcu = NULL;
1647 	bool ret = true;
1648 
1649 	dmcu = adev->dm.dc->res_pool->dmcu;
1650 
1651 	for (i = 0; i < 16; i++)
1652 		linear_lut[i] = 0xFFFF * i / 15;
1653 
1654 	params.set = 0;
1655 	params.backlight_ramping_start = 0xCCCC;
1656 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1657 	params.backlight_lut_array_size = 16;
1658 	params.backlight_lut_array = linear_lut;
1659 
1660 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1661 	 * 0xFFFF x 0.01 = 0x28F
1662 	 */
1663 	params.min_abm_backlight = 0x28F;
1664 
1665 	/* In the case where abm is implemented on dmcub,
1666 	 * dmcu object will be null.
1667 	 * ABM 2.4 and up are implemented on dmcub.
1668 	 */
1669 	if (dmcu)
1670 		ret = dmcu_load_iram(dmcu, params);
1671 	else if (adev->dm.dc->ctx->dmub_srv)
1672 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1673 
1674 	if (!ret)
1675 		return -EINVAL;
1676 
1677 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1678 }
1679 
1680 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1681 {
1682 	struct amdgpu_dm_connector *aconnector;
1683 	struct drm_connector *connector;
1684 	struct drm_connector_list_iter iter;
1685 	struct drm_dp_mst_topology_mgr *mgr;
1686 	int ret;
1687 	bool need_hotplug = false;
1688 
1689 	drm_connector_list_iter_begin(dev, &iter);
1690 	drm_for_each_connector_iter(connector, &iter) {
1691 		aconnector = to_amdgpu_dm_connector(connector);
1692 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1693 		    aconnector->mst_port)
1694 			continue;
1695 
1696 		mgr = &aconnector->mst_mgr;
1697 
1698 		if (suspend) {
1699 			drm_dp_mst_topology_mgr_suspend(mgr);
1700 		} else {
1701 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1702 			if (ret < 0) {
1703 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1704 				need_hotplug = true;
1705 			}
1706 		}
1707 	}
1708 	drm_connector_list_iter_end(&iter);
1709 
1710 	if (need_hotplug)
1711 		drm_kms_helper_hotplug_event(dev);
1712 }
1713 
1714 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1715 {
1716 	struct smu_context *smu = &adev->smu;
1717 	int ret = 0;
1718 
1719 	if (!is_support_sw_smu(adev))
1720 		return 0;
1721 
1722 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1723 	 * on window driver dc implementation.
1724 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1725 	 * should be passed to smu during boot up and resume from s3.
1726 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1727 	 * dcn20_resource_construct
1728 	 * then call pplib functions below to pass the settings to smu:
1729 	 * smu_set_watermarks_for_clock_ranges
1730 	 * smu_set_watermarks_table
1731 	 * navi10_set_watermarks_table
1732 	 * smu_write_watermarks_table
1733 	 *
1734 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1735 	 * dc has implemented different flow for window driver:
1736 	 * dc_hardware_init / dc_set_power_state
1737 	 * dcn10_init_hw
1738 	 * notify_wm_ranges
1739 	 * set_wm_ranges
1740 	 * -- Linux
1741 	 * smu_set_watermarks_for_clock_ranges
1742 	 * renoir_set_watermarks_table
1743 	 * smu_write_watermarks_table
1744 	 *
1745 	 * For Linux,
1746 	 * dc_hardware_init -> amdgpu_dm_init
1747 	 * dc_set_power_state --> dm_resume
1748 	 *
1749 	 * therefore, this function apply to navi10/12/14 but not Renoir
1750 	 * *
1751 	 */
1752 	switch(adev->asic_type) {
1753 	case CHIP_NAVI10:
1754 	case CHIP_NAVI14:
1755 	case CHIP_NAVI12:
1756 		break;
1757 	default:
1758 		return 0;
1759 	}
1760 
1761 	ret = smu_write_watermarks_table(smu);
1762 	if (ret) {
1763 		DRM_ERROR("Failed to update WMTABLE!\n");
1764 		return ret;
1765 	}
1766 
1767 	return 0;
1768 }
1769 
1770 /**
1771  * dm_hw_init() - Initialize DC device
1772  * @handle: The base driver device containing the amdgpu_dm device.
1773  *
1774  * Initialize the &struct amdgpu_display_manager device. This involves calling
1775  * the initializers of each DM component, then populating the struct with them.
1776  *
1777  * Although the function implies hardware initialization, both hardware and
1778  * software are initialized here. Splitting them out to their relevant init
1779  * hooks is a future TODO item.
1780  *
1781  * Some notable things that are initialized here:
1782  *
1783  * - Display Core, both software and hardware
1784  * - DC modules that we need (freesync and color management)
1785  * - DRM software states
1786  * - Interrupt sources and handlers
1787  * - Vblank support
1788  * - Debug FS entries, if enabled
1789  */
1790 static int dm_hw_init(void *handle)
1791 {
1792 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1793 	/* Create DAL display manager */
1794 	amdgpu_dm_init(adev);
1795 	amdgpu_dm_hpd_init(adev);
1796 
1797 	return 0;
1798 }
1799 
1800 /**
1801  * dm_hw_fini() - Teardown DC device
1802  * @handle: The base driver device containing the amdgpu_dm device.
1803  *
1804  * Teardown components within &struct amdgpu_display_manager that require
1805  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1806  * were loaded. Also flush IRQ workqueues and disable them.
1807  */
1808 static int dm_hw_fini(void *handle)
1809 {
1810 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1811 
1812 	amdgpu_dm_hpd_fini(adev);
1813 
1814 	amdgpu_dm_irq_fini(adev);
1815 	amdgpu_dm_fini(adev);
1816 	return 0;
1817 }
1818 
1819 
1820 static int dm_enable_vblank(struct drm_crtc *crtc);
1821 static void dm_disable_vblank(struct drm_crtc *crtc);
1822 
1823 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1824 				 struct dc_state *state, bool enable)
1825 {
1826 	enum dc_irq_source irq_source;
1827 	struct amdgpu_crtc *acrtc;
1828 	int rc = -EBUSY;
1829 	int i = 0;
1830 
1831 	for (i = 0; i < state->stream_count; i++) {
1832 		acrtc = get_crtc_by_otg_inst(
1833 				adev, state->stream_status[i].primary_otg_inst);
1834 
1835 		if (acrtc && state->stream_status[i].plane_count != 0) {
1836 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1837 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1838 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1839 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1840 			if (rc)
1841 				DRM_WARN("Failed to %s pflip interrupts\n",
1842 					 enable ? "enable" : "disable");
1843 
1844 			if (enable) {
1845 				rc = dm_enable_vblank(&acrtc->base);
1846 				if (rc)
1847 					DRM_WARN("Failed to enable vblank interrupts\n");
1848 			} else {
1849 				dm_disable_vblank(&acrtc->base);
1850 			}
1851 
1852 		}
1853 	}
1854 
1855 }
1856 
1857 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1858 {
1859 	struct dc_state *context = NULL;
1860 	enum dc_status res = DC_ERROR_UNEXPECTED;
1861 	int i;
1862 	struct dc_stream_state *del_streams[MAX_PIPES];
1863 	int del_streams_count = 0;
1864 
1865 	memset(del_streams, 0, sizeof(del_streams));
1866 
1867 	context = dc_create_state(dc);
1868 	if (context == NULL)
1869 		goto context_alloc_fail;
1870 
1871 	dc_resource_state_copy_construct_current(dc, context);
1872 
1873 	/* First remove from context all streams */
1874 	for (i = 0; i < context->stream_count; i++) {
1875 		struct dc_stream_state *stream = context->streams[i];
1876 
1877 		del_streams[del_streams_count++] = stream;
1878 	}
1879 
1880 	/* Remove all planes for removed streams and then remove the streams */
1881 	for (i = 0; i < del_streams_count; i++) {
1882 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1883 			res = DC_FAIL_DETACH_SURFACES;
1884 			goto fail;
1885 		}
1886 
1887 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1888 		if (res != DC_OK)
1889 			goto fail;
1890 	}
1891 
1892 
1893 	res = dc_validate_global_state(dc, context, false);
1894 
1895 	if (res != DC_OK) {
1896 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1897 		goto fail;
1898 	}
1899 
1900 	res = dc_commit_state(dc, context);
1901 
1902 fail:
1903 	dc_release_state(context);
1904 
1905 context_alloc_fail:
1906 	return res;
1907 }
1908 
1909 static int dm_suspend(void *handle)
1910 {
1911 	struct amdgpu_device *adev = handle;
1912 	struct amdgpu_display_manager *dm = &adev->dm;
1913 	int ret = 0;
1914 
1915 	if (amdgpu_in_reset(adev)) {
1916 		mutex_lock(&dm->dc_lock);
1917 
1918 #if defined(CONFIG_DRM_AMD_DC_DCN)
1919 		dc_allow_idle_optimizations(adev->dm.dc, false);
1920 #endif
1921 
1922 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1923 
1924 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1925 
1926 		amdgpu_dm_commit_zero_streams(dm->dc);
1927 
1928 		amdgpu_dm_irq_suspend(adev);
1929 
1930 		return ret;
1931 	}
1932 
1933 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1934 	amdgpu_dm_crtc_secure_display_suspend(adev);
1935 #endif
1936 	WARN_ON(adev->dm.cached_state);
1937 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1938 
1939 	s3_handle_mst(adev_to_drm(adev), true);
1940 
1941 	amdgpu_dm_irq_suspend(adev);
1942 
1943 
1944 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1945 
1946 	return 0;
1947 }
1948 
1949 static struct amdgpu_dm_connector *
1950 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1951 					     struct drm_crtc *crtc)
1952 {
1953 	uint32_t i;
1954 	struct drm_connector_state *new_con_state;
1955 	struct drm_connector *connector;
1956 	struct drm_crtc *crtc_from_state;
1957 
1958 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1959 		crtc_from_state = new_con_state->crtc;
1960 
1961 		if (crtc_from_state == crtc)
1962 			return to_amdgpu_dm_connector(connector);
1963 	}
1964 
1965 	return NULL;
1966 }
1967 
1968 static void emulated_link_detect(struct dc_link *link)
1969 {
1970 	struct dc_sink_init_data sink_init_data = { 0 };
1971 	struct display_sink_capability sink_caps = { 0 };
1972 	enum dc_edid_status edid_status;
1973 	struct dc_context *dc_ctx = link->ctx;
1974 	struct dc_sink *sink = NULL;
1975 	struct dc_sink *prev_sink = NULL;
1976 
1977 	link->type = dc_connection_none;
1978 	prev_sink = link->local_sink;
1979 
1980 	if (prev_sink)
1981 		dc_sink_release(prev_sink);
1982 
1983 	switch (link->connector_signal) {
1984 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1985 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1986 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1987 		break;
1988 	}
1989 
1990 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1991 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1992 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1993 		break;
1994 	}
1995 
1996 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1997 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1998 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1999 		break;
2000 	}
2001 
2002 	case SIGNAL_TYPE_LVDS: {
2003 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2004 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2005 		break;
2006 	}
2007 
2008 	case SIGNAL_TYPE_EDP: {
2009 		sink_caps.transaction_type =
2010 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2011 		sink_caps.signal = SIGNAL_TYPE_EDP;
2012 		break;
2013 	}
2014 
2015 	case SIGNAL_TYPE_DISPLAY_PORT: {
2016 		sink_caps.transaction_type =
2017 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2018 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2019 		break;
2020 	}
2021 
2022 	default:
2023 		DC_ERROR("Invalid connector type! signal:%d\n",
2024 			link->connector_signal);
2025 		return;
2026 	}
2027 
2028 	sink_init_data.link = link;
2029 	sink_init_data.sink_signal = sink_caps.signal;
2030 
2031 	sink = dc_sink_create(&sink_init_data);
2032 	if (!sink) {
2033 		DC_ERROR("Failed to create sink!\n");
2034 		return;
2035 	}
2036 
2037 	/* dc_sink_create returns a new reference */
2038 	link->local_sink = sink;
2039 
2040 	edid_status = dm_helpers_read_local_edid(
2041 			link->ctx,
2042 			link,
2043 			sink);
2044 
2045 	if (edid_status != EDID_OK)
2046 		DC_ERROR("Failed to read EDID");
2047 
2048 }
2049 
2050 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2051 				     struct amdgpu_display_manager *dm)
2052 {
2053 	struct {
2054 		struct dc_surface_update surface_updates[MAX_SURFACES];
2055 		struct dc_plane_info plane_infos[MAX_SURFACES];
2056 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2057 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2058 		struct dc_stream_update stream_update;
2059 	} * bundle;
2060 	int k, m;
2061 
2062 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2063 
2064 	if (!bundle) {
2065 		dm_error("Failed to allocate update bundle\n");
2066 		goto cleanup;
2067 	}
2068 
2069 	for (k = 0; k < dc_state->stream_count; k++) {
2070 		bundle->stream_update.stream = dc_state->streams[k];
2071 
2072 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2073 			bundle->surface_updates[m].surface =
2074 				dc_state->stream_status->plane_states[m];
2075 			bundle->surface_updates[m].surface->force_full_update =
2076 				true;
2077 		}
2078 		dc_commit_updates_for_stream(
2079 			dm->dc, bundle->surface_updates,
2080 			dc_state->stream_status->plane_count,
2081 			dc_state->streams[k], &bundle->stream_update, dc_state);
2082 	}
2083 
2084 cleanup:
2085 	kfree(bundle);
2086 
2087 	return;
2088 }
2089 
2090 static void dm_set_dpms_off(struct dc_link *link)
2091 {
2092 	struct dc_stream_state *stream_state;
2093 	struct amdgpu_dm_connector *aconnector = link->priv;
2094 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2095 	struct dc_stream_update stream_update;
2096 	bool dpms_off = true;
2097 
2098 	memset(&stream_update, 0, sizeof(stream_update));
2099 	stream_update.dpms_off = &dpms_off;
2100 
2101 	mutex_lock(&adev->dm.dc_lock);
2102 	stream_state = dc_stream_find_from_link(link);
2103 
2104 	if (stream_state == NULL) {
2105 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2106 		mutex_unlock(&adev->dm.dc_lock);
2107 		return;
2108 	}
2109 
2110 	stream_update.stream = stream_state;
2111 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2112 				     stream_state, &stream_update,
2113 				     stream_state->ctx->dc->current_state);
2114 	mutex_unlock(&adev->dm.dc_lock);
2115 }
2116 
2117 static int dm_resume(void *handle)
2118 {
2119 	struct amdgpu_device *adev = handle;
2120 	struct drm_device *ddev = adev_to_drm(adev);
2121 	struct amdgpu_display_manager *dm = &adev->dm;
2122 	struct amdgpu_dm_connector *aconnector;
2123 	struct drm_connector *connector;
2124 	struct drm_connector_list_iter iter;
2125 	struct drm_crtc *crtc;
2126 	struct drm_crtc_state *new_crtc_state;
2127 	struct dm_crtc_state *dm_new_crtc_state;
2128 	struct drm_plane *plane;
2129 	struct drm_plane_state *new_plane_state;
2130 	struct dm_plane_state *dm_new_plane_state;
2131 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2132 	enum dc_connection_type new_connection_type = dc_connection_none;
2133 	struct dc_state *dc_state;
2134 	int i, r, j;
2135 
2136 	if (amdgpu_in_reset(adev)) {
2137 		dc_state = dm->cached_dc_state;
2138 
2139 		r = dm_dmub_hw_init(adev);
2140 		if (r)
2141 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2142 
2143 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2144 		dc_resume(dm->dc);
2145 
2146 		amdgpu_dm_irq_resume_early(adev);
2147 
2148 		for (i = 0; i < dc_state->stream_count; i++) {
2149 			dc_state->streams[i]->mode_changed = true;
2150 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2151 				dc_state->stream_status->plane_states[j]->update_flags.raw
2152 					= 0xffffffff;
2153 			}
2154 		}
2155 
2156 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2157 
2158 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2159 
2160 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2161 
2162 		dc_release_state(dm->cached_dc_state);
2163 		dm->cached_dc_state = NULL;
2164 
2165 		amdgpu_dm_irq_resume_late(adev);
2166 
2167 		mutex_unlock(&dm->dc_lock);
2168 
2169 		return 0;
2170 	}
2171 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2172 	dc_release_state(dm_state->context);
2173 	dm_state->context = dc_create_state(dm->dc);
2174 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2175 	dc_resource_state_construct(dm->dc, dm_state->context);
2176 
2177 	/* Before powering on DC we need to re-initialize DMUB. */
2178 	r = dm_dmub_hw_init(adev);
2179 	if (r)
2180 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2181 
2182 	/* power on hardware */
2183 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2184 
2185 	/* program HPD filter */
2186 	dc_resume(dm->dc);
2187 
2188 	/*
2189 	 * early enable HPD Rx IRQ, should be done before set mode as short
2190 	 * pulse interrupts are used for MST
2191 	 */
2192 	amdgpu_dm_irq_resume_early(adev);
2193 
2194 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2195 	s3_handle_mst(ddev, false);
2196 
2197 	/* Do detection*/
2198 	drm_connector_list_iter_begin(ddev, &iter);
2199 	drm_for_each_connector_iter(connector, &iter) {
2200 		aconnector = to_amdgpu_dm_connector(connector);
2201 
2202 		/*
2203 		 * this is the case when traversing through already created
2204 		 * MST connectors, should be skipped
2205 		 */
2206 		if (aconnector->mst_port)
2207 			continue;
2208 
2209 		mutex_lock(&aconnector->hpd_lock);
2210 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2211 			DRM_ERROR("KMS: Failed to detect connector\n");
2212 
2213 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2214 			emulated_link_detect(aconnector->dc_link);
2215 		else
2216 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2217 
2218 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2219 			aconnector->fake_enable = false;
2220 
2221 		if (aconnector->dc_sink)
2222 			dc_sink_release(aconnector->dc_sink);
2223 		aconnector->dc_sink = NULL;
2224 		amdgpu_dm_update_connector_after_detect(aconnector);
2225 		mutex_unlock(&aconnector->hpd_lock);
2226 	}
2227 	drm_connector_list_iter_end(&iter);
2228 
2229 	/* Force mode set in atomic commit */
2230 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2231 		new_crtc_state->active_changed = true;
2232 
2233 	/*
2234 	 * atomic_check is expected to create the dc states. We need to release
2235 	 * them here, since they were duplicated as part of the suspend
2236 	 * procedure.
2237 	 */
2238 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2239 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2240 		if (dm_new_crtc_state->stream) {
2241 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2242 			dc_stream_release(dm_new_crtc_state->stream);
2243 			dm_new_crtc_state->stream = NULL;
2244 		}
2245 	}
2246 
2247 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2248 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2249 		if (dm_new_plane_state->dc_state) {
2250 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2251 			dc_plane_state_release(dm_new_plane_state->dc_state);
2252 			dm_new_plane_state->dc_state = NULL;
2253 		}
2254 	}
2255 
2256 	drm_atomic_helper_resume(ddev, dm->cached_state);
2257 
2258 	dm->cached_state = NULL;
2259 
2260 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2261 	amdgpu_dm_crtc_secure_display_resume(adev);
2262 #endif
2263 
2264 	amdgpu_dm_irq_resume_late(adev);
2265 
2266 	amdgpu_dm_smu_write_watermarks_table(adev);
2267 
2268 	return 0;
2269 }
2270 
2271 /**
2272  * DOC: DM Lifecycle
2273  *
2274  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2275  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2276  * the base driver's device list to be initialized and torn down accordingly.
2277  *
2278  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2279  */
2280 
2281 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2282 	.name = "dm",
2283 	.early_init = dm_early_init,
2284 	.late_init = dm_late_init,
2285 	.sw_init = dm_sw_init,
2286 	.sw_fini = dm_sw_fini,
2287 	.hw_init = dm_hw_init,
2288 	.hw_fini = dm_hw_fini,
2289 	.suspend = dm_suspend,
2290 	.resume = dm_resume,
2291 	.is_idle = dm_is_idle,
2292 	.wait_for_idle = dm_wait_for_idle,
2293 	.check_soft_reset = dm_check_soft_reset,
2294 	.soft_reset = dm_soft_reset,
2295 	.set_clockgating_state = dm_set_clockgating_state,
2296 	.set_powergating_state = dm_set_powergating_state,
2297 };
2298 
2299 const struct amdgpu_ip_block_version dm_ip_block =
2300 {
2301 	.type = AMD_IP_BLOCK_TYPE_DCE,
2302 	.major = 1,
2303 	.minor = 0,
2304 	.rev = 0,
2305 	.funcs = &amdgpu_dm_funcs,
2306 };
2307 
2308 
2309 /**
2310  * DOC: atomic
2311  *
2312  * *WIP*
2313  */
2314 
2315 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2316 	.fb_create = amdgpu_display_user_framebuffer_create,
2317 	.get_format_info = amd_get_format_info,
2318 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2319 	.atomic_check = amdgpu_dm_atomic_check,
2320 	.atomic_commit = drm_atomic_helper_commit,
2321 };
2322 
2323 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2324 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2325 };
2326 
2327 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2328 {
2329 	u32 max_cll, min_cll, max, min, q, r;
2330 	struct amdgpu_dm_backlight_caps *caps;
2331 	struct amdgpu_display_manager *dm;
2332 	struct drm_connector *conn_base;
2333 	struct amdgpu_device *adev;
2334 	struct dc_link *link = NULL;
2335 	static const u8 pre_computed_values[] = {
2336 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2337 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2338 
2339 	if (!aconnector || !aconnector->dc_link)
2340 		return;
2341 
2342 	link = aconnector->dc_link;
2343 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2344 		return;
2345 
2346 	conn_base = &aconnector->base;
2347 	adev = drm_to_adev(conn_base->dev);
2348 	dm = &adev->dm;
2349 	caps = &dm->backlight_caps;
2350 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2351 	caps->aux_support = false;
2352 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2353 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2354 
2355 	if (caps->ext_caps->bits.oled == 1 ||
2356 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2357 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2358 		caps->aux_support = true;
2359 
2360 	if (amdgpu_backlight == 0)
2361 		caps->aux_support = false;
2362 	else if (amdgpu_backlight == 1)
2363 		caps->aux_support = true;
2364 
2365 	/* From the specification (CTA-861-G), for calculating the maximum
2366 	 * luminance we need to use:
2367 	 *	Luminance = 50*2**(CV/32)
2368 	 * Where CV is a one-byte value.
2369 	 * For calculating this expression we may need float point precision;
2370 	 * to avoid this complexity level, we take advantage that CV is divided
2371 	 * by a constant. From the Euclids division algorithm, we know that CV
2372 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2373 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2374 	 * need to pre-compute the value of r/32. For pre-computing the values
2375 	 * We just used the following Ruby line:
2376 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2377 	 * The results of the above expressions can be verified at
2378 	 * pre_computed_values.
2379 	 */
2380 	q = max_cll >> 5;
2381 	r = max_cll % 32;
2382 	max = (1 << q) * pre_computed_values[r];
2383 
2384 	// min luminance: maxLum * (CV/255)^2 / 100
2385 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2386 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2387 
2388 	caps->aux_max_input_signal = max;
2389 	caps->aux_min_input_signal = min;
2390 }
2391 
2392 void amdgpu_dm_update_connector_after_detect(
2393 		struct amdgpu_dm_connector *aconnector)
2394 {
2395 	struct drm_connector *connector = &aconnector->base;
2396 	struct drm_device *dev = connector->dev;
2397 	struct dc_sink *sink;
2398 
2399 	/* MST handled by drm_mst framework */
2400 	if (aconnector->mst_mgr.mst_state == true)
2401 		return;
2402 
2403 	sink = aconnector->dc_link->local_sink;
2404 	if (sink)
2405 		dc_sink_retain(sink);
2406 
2407 	/*
2408 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2409 	 * the connector sink is set to either fake or physical sink depends on link status.
2410 	 * Skip if already done during boot.
2411 	 */
2412 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2413 			&& aconnector->dc_em_sink) {
2414 
2415 		/*
2416 		 * For S3 resume with headless use eml_sink to fake stream
2417 		 * because on resume connector->sink is set to NULL
2418 		 */
2419 		mutex_lock(&dev->mode_config.mutex);
2420 
2421 		if (sink) {
2422 			if (aconnector->dc_sink) {
2423 				amdgpu_dm_update_freesync_caps(connector, NULL);
2424 				/*
2425 				 * retain and release below are used to
2426 				 * bump up refcount for sink because the link doesn't point
2427 				 * to it anymore after disconnect, so on next crtc to connector
2428 				 * reshuffle by UMD we will get into unwanted dc_sink release
2429 				 */
2430 				dc_sink_release(aconnector->dc_sink);
2431 			}
2432 			aconnector->dc_sink = sink;
2433 			dc_sink_retain(aconnector->dc_sink);
2434 			amdgpu_dm_update_freesync_caps(connector,
2435 					aconnector->edid);
2436 		} else {
2437 			amdgpu_dm_update_freesync_caps(connector, NULL);
2438 			if (!aconnector->dc_sink) {
2439 				aconnector->dc_sink = aconnector->dc_em_sink;
2440 				dc_sink_retain(aconnector->dc_sink);
2441 			}
2442 		}
2443 
2444 		mutex_unlock(&dev->mode_config.mutex);
2445 
2446 		if (sink)
2447 			dc_sink_release(sink);
2448 		return;
2449 	}
2450 
2451 	/*
2452 	 * TODO: temporary guard to look for proper fix
2453 	 * if this sink is MST sink, we should not do anything
2454 	 */
2455 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2456 		dc_sink_release(sink);
2457 		return;
2458 	}
2459 
2460 	if (aconnector->dc_sink == sink) {
2461 		/*
2462 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2463 		 * Do nothing!!
2464 		 */
2465 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2466 				aconnector->connector_id);
2467 		if (sink)
2468 			dc_sink_release(sink);
2469 		return;
2470 	}
2471 
2472 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2473 		aconnector->connector_id, aconnector->dc_sink, sink);
2474 
2475 	mutex_lock(&dev->mode_config.mutex);
2476 
2477 	/*
2478 	 * 1. Update status of the drm connector
2479 	 * 2. Send an event and let userspace tell us what to do
2480 	 */
2481 	if (sink) {
2482 		/*
2483 		 * TODO: check if we still need the S3 mode update workaround.
2484 		 * If yes, put it here.
2485 		 */
2486 		if (aconnector->dc_sink) {
2487 			amdgpu_dm_update_freesync_caps(connector, NULL);
2488 			dc_sink_release(aconnector->dc_sink);
2489 		}
2490 
2491 		aconnector->dc_sink = sink;
2492 		dc_sink_retain(aconnector->dc_sink);
2493 		if (sink->dc_edid.length == 0) {
2494 			aconnector->edid = NULL;
2495 			if (aconnector->dc_link->aux_mode) {
2496 				drm_dp_cec_unset_edid(
2497 					&aconnector->dm_dp_aux.aux);
2498 			}
2499 		} else {
2500 			aconnector->edid =
2501 				(struct edid *)sink->dc_edid.raw_edid;
2502 
2503 			drm_connector_update_edid_property(connector,
2504 							   aconnector->edid);
2505 			if (aconnector->dc_link->aux_mode)
2506 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2507 						    aconnector->edid);
2508 		}
2509 
2510 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2511 		update_connector_ext_caps(aconnector);
2512 	} else {
2513 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2514 		amdgpu_dm_update_freesync_caps(connector, NULL);
2515 		drm_connector_update_edid_property(connector, NULL);
2516 		aconnector->num_modes = 0;
2517 		dc_sink_release(aconnector->dc_sink);
2518 		aconnector->dc_sink = NULL;
2519 		aconnector->edid = NULL;
2520 #ifdef CONFIG_DRM_AMD_DC_HDCP
2521 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2522 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2523 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2524 #endif
2525 	}
2526 
2527 	mutex_unlock(&dev->mode_config.mutex);
2528 
2529 	update_subconnector_property(aconnector);
2530 
2531 	if (sink)
2532 		dc_sink_release(sink);
2533 }
2534 
2535 static void handle_hpd_irq(void *param)
2536 {
2537 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2538 	struct drm_connector *connector = &aconnector->base;
2539 	struct drm_device *dev = connector->dev;
2540 	enum dc_connection_type new_connection_type = dc_connection_none;
2541 #ifdef CONFIG_DRM_AMD_DC_HDCP
2542 	struct amdgpu_device *adev = drm_to_adev(dev);
2543 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2544 #endif
2545 
2546 	/*
2547 	 * In case of failure or MST no need to update connector status or notify the OS
2548 	 * since (for MST case) MST does this in its own context.
2549 	 */
2550 	mutex_lock(&aconnector->hpd_lock);
2551 
2552 #ifdef CONFIG_DRM_AMD_DC_HDCP
2553 	if (adev->dm.hdcp_workqueue) {
2554 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2555 		dm_con_state->update_hdcp = true;
2556 	}
2557 #endif
2558 	if (aconnector->fake_enable)
2559 		aconnector->fake_enable = false;
2560 
2561 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2562 		DRM_ERROR("KMS: Failed to detect connector\n");
2563 
2564 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2565 		emulated_link_detect(aconnector->dc_link);
2566 
2567 
2568 		drm_modeset_lock_all(dev);
2569 		dm_restore_drm_connector_state(dev, connector);
2570 		drm_modeset_unlock_all(dev);
2571 
2572 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2573 			drm_kms_helper_hotplug_event(dev);
2574 
2575 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2576 		if (new_connection_type == dc_connection_none &&
2577 		    aconnector->dc_link->type == dc_connection_none)
2578 			dm_set_dpms_off(aconnector->dc_link);
2579 
2580 		amdgpu_dm_update_connector_after_detect(aconnector);
2581 
2582 		drm_modeset_lock_all(dev);
2583 		dm_restore_drm_connector_state(dev, connector);
2584 		drm_modeset_unlock_all(dev);
2585 
2586 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 			drm_kms_helper_hotplug_event(dev);
2588 	}
2589 	mutex_unlock(&aconnector->hpd_lock);
2590 
2591 }
2592 
2593 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2594 {
2595 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2596 	uint8_t dret;
2597 	bool new_irq_handled = false;
2598 	int dpcd_addr;
2599 	int dpcd_bytes_to_read;
2600 
2601 	const int max_process_count = 30;
2602 	int process_count = 0;
2603 
2604 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2605 
2606 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2607 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2608 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2609 		dpcd_addr = DP_SINK_COUNT;
2610 	} else {
2611 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2612 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2613 		dpcd_addr = DP_SINK_COUNT_ESI;
2614 	}
2615 
2616 	dret = drm_dp_dpcd_read(
2617 		&aconnector->dm_dp_aux.aux,
2618 		dpcd_addr,
2619 		esi,
2620 		dpcd_bytes_to_read);
2621 
2622 	while (dret == dpcd_bytes_to_read &&
2623 		process_count < max_process_count) {
2624 		uint8_t retry;
2625 		dret = 0;
2626 
2627 		process_count++;
2628 
2629 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2630 		/* handle HPD short pulse irq */
2631 		if (aconnector->mst_mgr.mst_state)
2632 			drm_dp_mst_hpd_irq(
2633 				&aconnector->mst_mgr,
2634 				esi,
2635 				&new_irq_handled);
2636 
2637 		if (new_irq_handled) {
2638 			/* ACK at DPCD to notify down stream */
2639 			const int ack_dpcd_bytes_to_write =
2640 				dpcd_bytes_to_read - 1;
2641 
2642 			for (retry = 0; retry < 3; retry++) {
2643 				uint8_t wret;
2644 
2645 				wret = drm_dp_dpcd_write(
2646 					&aconnector->dm_dp_aux.aux,
2647 					dpcd_addr + 1,
2648 					&esi[1],
2649 					ack_dpcd_bytes_to_write);
2650 				if (wret == ack_dpcd_bytes_to_write)
2651 					break;
2652 			}
2653 
2654 			/* check if there is new irq to be handled */
2655 			dret = drm_dp_dpcd_read(
2656 				&aconnector->dm_dp_aux.aux,
2657 				dpcd_addr,
2658 				esi,
2659 				dpcd_bytes_to_read);
2660 
2661 			new_irq_handled = false;
2662 		} else {
2663 			break;
2664 		}
2665 	}
2666 
2667 	if (process_count == max_process_count)
2668 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2669 }
2670 
2671 static void handle_hpd_rx_irq(void *param)
2672 {
2673 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2674 	struct drm_connector *connector = &aconnector->base;
2675 	struct drm_device *dev = connector->dev;
2676 	struct dc_link *dc_link = aconnector->dc_link;
2677 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2678 	bool result = false;
2679 	enum dc_connection_type new_connection_type = dc_connection_none;
2680 	struct amdgpu_device *adev = drm_to_adev(dev);
2681 	union hpd_irq_data hpd_irq_data;
2682 
2683 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2684 
2685 	/*
2686 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2687 	 * conflict, after implement i2c helper, this mutex should be
2688 	 * retired.
2689 	 */
2690 	if (dc_link->type != dc_connection_mst_branch)
2691 		mutex_lock(&aconnector->hpd_lock);
2692 
2693 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2694 
2695 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2696 		(dc_link->type == dc_connection_mst_branch)) {
2697 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2698 			result = true;
2699 			dm_handle_hpd_rx_irq(aconnector);
2700 			goto out;
2701 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2702 			result = false;
2703 			dm_handle_hpd_rx_irq(aconnector);
2704 			goto out;
2705 		}
2706 	}
2707 
2708 	mutex_lock(&adev->dm.dc_lock);
2709 #ifdef CONFIG_DRM_AMD_DC_HDCP
2710 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2711 #else
2712 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2713 #endif
2714 	mutex_unlock(&adev->dm.dc_lock);
2715 
2716 out:
2717 	if (result && !is_mst_root_connector) {
2718 		/* Downstream Port status changed. */
2719 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2720 			DRM_ERROR("KMS: Failed to detect connector\n");
2721 
2722 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2723 			emulated_link_detect(dc_link);
2724 
2725 			if (aconnector->fake_enable)
2726 				aconnector->fake_enable = false;
2727 
2728 			amdgpu_dm_update_connector_after_detect(aconnector);
2729 
2730 
2731 			drm_modeset_lock_all(dev);
2732 			dm_restore_drm_connector_state(dev, connector);
2733 			drm_modeset_unlock_all(dev);
2734 
2735 			drm_kms_helper_hotplug_event(dev);
2736 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2737 
2738 			if (aconnector->fake_enable)
2739 				aconnector->fake_enable = false;
2740 
2741 			amdgpu_dm_update_connector_after_detect(aconnector);
2742 
2743 
2744 			drm_modeset_lock_all(dev);
2745 			dm_restore_drm_connector_state(dev, connector);
2746 			drm_modeset_unlock_all(dev);
2747 
2748 			drm_kms_helper_hotplug_event(dev);
2749 		}
2750 	}
2751 #ifdef CONFIG_DRM_AMD_DC_HDCP
2752 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2753 		if (adev->dm.hdcp_workqueue)
2754 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2755 	}
2756 #endif
2757 
2758 	if (dc_link->type != dc_connection_mst_branch) {
2759 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2760 		mutex_unlock(&aconnector->hpd_lock);
2761 	}
2762 }
2763 
2764 static void register_hpd_handlers(struct amdgpu_device *adev)
2765 {
2766 	struct drm_device *dev = adev_to_drm(adev);
2767 	struct drm_connector *connector;
2768 	struct amdgpu_dm_connector *aconnector;
2769 	const struct dc_link *dc_link;
2770 	struct dc_interrupt_params int_params = {0};
2771 
2772 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2773 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2774 
2775 	list_for_each_entry(connector,
2776 			&dev->mode_config.connector_list, head)	{
2777 
2778 		aconnector = to_amdgpu_dm_connector(connector);
2779 		dc_link = aconnector->dc_link;
2780 
2781 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2782 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2783 			int_params.irq_source = dc_link->irq_source_hpd;
2784 
2785 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2786 					handle_hpd_irq,
2787 					(void *) aconnector);
2788 		}
2789 
2790 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2791 
2792 			/* Also register for DP short pulse (hpd_rx). */
2793 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2794 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2795 
2796 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2797 					handle_hpd_rx_irq,
2798 					(void *) aconnector);
2799 		}
2800 	}
2801 }
2802 
2803 #if defined(CONFIG_DRM_AMD_DC_SI)
2804 /* Register IRQ sources and initialize IRQ callbacks */
2805 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2806 {
2807 	struct dc *dc = adev->dm.dc;
2808 	struct common_irq_params *c_irq_params;
2809 	struct dc_interrupt_params int_params = {0};
2810 	int r;
2811 	int i;
2812 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2813 
2814 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2815 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2816 
2817 	/*
2818 	 * Actions of amdgpu_irq_add_id():
2819 	 * 1. Register a set() function with base driver.
2820 	 *    Base driver will call set() function to enable/disable an
2821 	 *    interrupt in DC hardware.
2822 	 * 2. Register amdgpu_dm_irq_handler().
2823 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2824 	 *    coming from DC hardware.
2825 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2826 	 *    for acknowledging and handling. */
2827 
2828 	/* Use VBLANK interrupt */
2829 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2830 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2831 		if (r) {
2832 			DRM_ERROR("Failed to add crtc irq id!\n");
2833 			return r;
2834 		}
2835 
2836 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2837 		int_params.irq_source =
2838 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2839 
2840 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2841 
2842 		c_irq_params->adev = adev;
2843 		c_irq_params->irq_src = int_params.irq_source;
2844 
2845 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2846 				dm_crtc_high_irq, c_irq_params);
2847 	}
2848 
2849 	/* Use GRPH_PFLIP interrupt */
2850 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2851 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2852 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2853 		if (r) {
2854 			DRM_ERROR("Failed to add page flip irq id!\n");
2855 			return r;
2856 		}
2857 
2858 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2859 		int_params.irq_source =
2860 			dc_interrupt_to_irq_source(dc, i, 0);
2861 
2862 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2863 
2864 		c_irq_params->adev = adev;
2865 		c_irq_params->irq_src = int_params.irq_source;
2866 
2867 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2868 				dm_pflip_high_irq, c_irq_params);
2869 
2870 	}
2871 
2872 	/* HPD */
2873 	r = amdgpu_irq_add_id(adev, client_id,
2874 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2875 	if (r) {
2876 		DRM_ERROR("Failed to add hpd irq id!\n");
2877 		return r;
2878 	}
2879 
2880 	register_hpd_handlers(adev);
2881 
2882 	return 0;
2883 }
2884 #endif
2885 
2886 /* Register IRQ sources and initialize IRQ callbacks */
2887 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2888 {
2889 	struct dc *dc = adev->dm.dc;
2890 	struct common_irq_params *c_irq_params;
2891 	struct dc_interrupt_params int_params = {0};
2892 	int r;
2893 	int i;
2894 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2895 
2896 	if (adev->asic_type >= CHIP_VEGA10)
2897 		client_id = SOC15_IH_CLIENTID_DCE;
2898 
2899 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2900 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2901 
2902 	/*
2903 	 * Actions of amdgpu_irq_add_id():
2904 	 * 1. Register a set() function with base driver.
2905 	 *    Base driver will call set() function to enable/disable an
2906 	 *    interrupt in DC hardware.
2907 	 * 2. Register amdgpu_dm_irq_handler().
2908 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2909 	 *    coming from DC hardware.
2910 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2911 	 *    for acknowledging and handling. */
2912 
2913 	/* Use VBLANK interrupt */
2914 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2915 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2916 		if (r) {
2917 			DRM_ERROR("Failed to add crtc irq id!\n");
2918 			return r;
2919 		}
2920 
2921 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2922 		int_params.irq_source =
2923 			dc_interrupt_to_irq_source(dc, i, 0);
2924 
2925 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2926 
2927 		c_irq_params->adev = adev;
2928 		c_irq_params->irq_src = int_params.irq_source;
2929 
2930 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2931 				dm_crtc_high_irq, c_irq_params);
2932 	}
2933 
2934 	/* Use VUPDATE interrupt */
2935 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2936 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2937 		if (r) {
2938 			DRM_ERROR("Failed to add vupdate irq id!\n");
2939 			return r;
2940 		}
2941 
2942 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943 		int_params.irq_source =
2944 			dc_interrupt_to_irq_source(dc, i, 0);
2945 
2946 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2947 
2948 		c_irq_params->adev = adev;
2949 		c_irq_params->irq_src = int_params.irq_source;
2950 
2951 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952 				dm_vupdate_high_irq, c_irq_params);
2953 	}
2954 
2955 	/* Use GRPH_PFLIP interrupt */
2956 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2957 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2958 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2959 		if (r) {
2960 			DRM_ERROR("Failed to add page flip irq id!\n");
2961 			return r;
2962 		}
2963 
2964 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2965 		int_params.irq_source =
2966 			dc_interrupt_to_irq_source(dc, i, 0);
2967 
2968 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2969 
2970 		c_irq_params->adev = adev;
2971 		c_irq_params->irq_src = int_params.irq_source;
2972 
2973 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2974 				dm_pflip_high_irq, c_irq_params);
2975 
2976 	}
2977 
2978 	/* HPD */
2979 	r = amdgpu_irq_add_id(adev, client_id,
2980 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2981 	if (r) {
2982 		DRM_ERROR("Failed to add hpd irq id!\n");
2983 		return r;
2984 	}
2985 
2986 	register_hpd_handlers(adev);
2987 
2988 	return 0;
2989 }
2990 
2991 #if defined(CONFIG_DRM_AMD_DC_DCN)
2992 /* Register IRQ sources and initialize IRQ callbacks */
2993 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2994 {
2995 	struct dc *dc = adev->dm.dc;
2996 	struct common_irq_params *c_irq_params;
2997 	struct dc_interrupt_params int_params = {0};
2998 	int r;
2999 	int i;
3000 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3001 	static const unsigned int vrtl_int_srcid[] = {
3002 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3003 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3004 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3005 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3006 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3007 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3008 	};
3009 #endif
3010 
3011 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3012 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3013 
3014 	/*
3015 	 * Actions of amdgpu_irq_add_id():
3016 	 * 1. Register a set() function with base driver.
3017 	 *    Base driver will call set() function to enable/disable an
3018 	 *    interrupt in DC hardware.
3019 	 * 2. Register amdgpu_dm_irq_handler().
3020 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3021 	 *    coming from DC hardware.
3022 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3023 	 *    for acknowledging and handling.
3024 	 */
3025 
3026 	/* Use VSTARTUP interrupt */
3027 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3028 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3029 			i++) {
3030 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3031 
3032 		if (r) {
3033 			DRM_ERROR("Failed to add crtc irq id!\n");
3034 			return r;
3035 		}
3036 
3037 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3038 		int_params.irq_source =
3039 			dc_interrupt_to_irq_source(dc, i, 0);
3040 
3041 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3042 
3043 		c_irq_params->adev = adev;
3044 		c_irq_params->irq_src = int_params.irq_source;
3045 
3046 		amdgpu_dm_irq_register_interrupt(
3047 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3048 	}
3049 
3050 	/* Use otg vertical line interrupt */
3051 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3052 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3053 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3054 				vrtl_int_srcid[i], &adev->vline0_irq);
3055 
3056 		if (r) {
3057 			DRM_ERROR("Failed to add vline0 irq id!\n");
3058 			return r;
3059 		}
3060 
3061 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3062 		int_params.irq_source =
3063 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3064 
3065 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3066 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3067 			break;
3068 		}
3069 
3070 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3071 					- DC_IRQ_SOURCE_DC1_VLINE0];
3072 
3073 		c_irq_params->adev = adev;
3074 		c_irq_params->irq_src = int_params.irq_source;
3075 
3076 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3077 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3078 	}
3079 #endif
3080 
3081 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3082 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3083 	 * to trigger at end of each vblank, regardless of state of the lock,
3084 	 * matching DCE behaviour.
3085 	 */
3086 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3087 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3088 	     i++) {
3089 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3090 
3091 		if (r) {
3092 			DRM_ERROR("Failed to add vupdate irq id!\n");
3093 			return r;
3094 		}
3095 
3096 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3097 		int_params.irq_source =
3098 			dc_interrupt_to_irq_source(dc, i, 0);
3099 
3100 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3101 
3102 		c_irq_params->adev = adev;
3103 		c_irq_params->irq_src = int_params.irq_source;
3104 
3105 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3106 				dm_vupdate_high_irq, c_irq_params);
3107 	}
3108 
3109 	/* Use GRPH_PFLIP interrupt */
3110 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3111 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3112 			i++) {
3113 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3114 		if (r) {
3115 			DRM_ERROR("Failed to add page flip irq id!\n");
3116 			return r;
3117 		}
3118 
3119 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3120 		int_params.irq_source =
3121 			dc_interrupt_to_irq_source(dc, i, 0);
3122 
3123 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3124 
3125 		c_irq_params->adev = adev;
3126 		c_irq_params->irq_src = int_params.irq_source;
3127 
3128 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3129 				dm_pflip_high_irq, c_irq_params);
3130 
3131 	}
3132 
3133 	if (dc->ctx->dmub_srv) {
3134 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3135 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3136 
3137 		if (r) {
3138 			DRM_ERROR("Failed to add dmub trace irq id!\n");
3139 			return r;
3140 		}
3141 
3142 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143 		int_params.irq_source =
3144 			dc_interrupt_to_irq_source(dc, i, 0);
3145 
3146 		c_irq_params = &adev->dm.dmub_trace_params[0];
3147 
3148 		c_irq_params->adev = adev;
3149 		c_irq_params->irq_src = int_params.irq_source;
3150 
3151 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3152 				dm_dmub_trace_high_irq, c_irq_params);
3153 	}
3154 
3155 	/* HPD */
3156 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3157 			&adev->hpd_irq);
3158 	if (r) {
3159 		DRM_ERROR("Failed to add hpd irq id!\n");
3160 		return r;
3161 	}
3162 
3163 	register_hpd_handlers(adev);
3164 
3165 	return 0;
3166 }
3167 #endif
3168 
3169 /*
3170  * Acquires the lock for the atomic state object and returns
3171  * the new atomic state.
3172  *
3173  * This should only be called during atomic check.
3174  */
3175 static int dm_atomic_get_state(struct drm_atomic_state *state,
3176 			       struct dm_atomic_state **dm_state)
3177 {
3178 	struct drm_device *dev = state->dev;
3179 	struct amdgpu_device *adev = drm_to_adev(dev);
3180 	struct amdgpu_display_manager *dm = &adev->dm;
3181 	struct drm_private_state *priv_state;
3182 
3183 	if (*dm_state)
3184 		return 0;
3185 
3186 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3187 	if (IS_ERR(priv_state))
3188 		return PTR_ERR(priv_state);
3189 
3190 	*dm_state = to_dm_atomic_state(priv_state);
3191 
3192 	return 0;
3193 }
3194 
3195 static struct dm_atomic_state *
3196 dm_atomic_get_new_state(struct drm_atomic_state *state)
3197 {
3198 	struct drm_device *dev = state->dev;
3199 	struct amdgpu_device *adev = drm_to_adev(dev);
3200 	struct amdgpu_display_manager *dm = &adev->dm;
3201 	struct drm_private_obj *obj;
3202 	struct drm_private_state *new_obj_state;
3203 	int i;
3204 
3205 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3206 		if (obj->funcs == dm->atomic_obj.funcs)
3207 			return to_dm_atomic_state(new_obj_state);
3208 	}
3209 
3210 	return NULL;
3211 }
3212 
3213 static struct drm_private_state *
3214 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3215 {
3216 	struct dm_atomic_state *old_state, *new_state;
3217 
3218 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3219 	if (!new_state)
3220 		return NULL;
3221 
3222 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3223 
3224 	old_state = to_dm_atomic_state(obj->state);
3225 
3226 	if (old_state && old_state->context)
3227 		new_state->context = dc_copy_state(old_state->context);
3228 
3229 	if (!new_state->context) {
3230 		kfree(new_state);
3231 		return NULL;
3232 	}
3233 
3234 	return &new_state->base;
3235 }
3236 
3237 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3238 				    struct drm_private_state *state)
3239 {
3240 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3241 
3242 	if (dm_state && dm_state->context)
3243 		dc_release_state(dm_state->context);
3244 
3245 	kfree(dm_state);
3246 }
3247 
3248 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3249 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3250 	.atomic_destroy_state = dm_atomic_destroy_state,
3251 };
3252 
3253 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3254 {
3255 	struct dm_atomic_state *state;
3256 	int r;
3257 
3258 	adev->mode_info.mode_config_initialized = true;
3259 
3260 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3261 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3262 
3263 	adev_to_drm(adev)->mode_config.max_width = 16384;
3264 	adev_to_drm(adev)->mode_config.max_height = 16384;
3265 
3266 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3267 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3268 	/* indicates support for immediate flip */
3269 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3270 
3271 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3272 
3273 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3274 	if (!state)
3275 		return -ENOMEM;
3276 
3277 	state->context = dc_create_state(adev->dm.dc);
3278 	if (!state->context) {
3279 		kfree(state);
3280 		return -ENOMEM;
3281 	}
3282 
3283 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3284 
3285 	drm_atomic_private_obj_init(adev_to_drm(adev),
3286 				    &adev->dm.atomic_obj,
3287 				    &state->base,
3288 				    &dm_atomic_state_funcs);
3289 
3290 	r = amdgpu_display_modeset_create_props(adev);
3291 	if (r) {
3292 		dc_release_state(state->context);
3293 		kfree(state);
3294 		return r;
3295 	}
3296 
3297 	r = amdgpu_dm_audio_init(adev);
3298 	if (r) {
3299 		dc_release_state(state->context);
3300 		kfree(state);
3301 		return r;
3302 	}
3303 
3304 	return 0;
3305 }
3306 
3307 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3308 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3309 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3310 
3311 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3312 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3313 
3314 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3315 {
3316 #if defined(CONFIG_ACPI)
3317 	struct amdgpu_dm_backlight_caps caps;
3318 
3319 	memset(&caps, 0, sizeof(caps));
3320 
3321 	if (dm->backlight_caps.caps_valid)
3322 		return;
3323 
3324 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3325 	if (caps.caps_valid) {
3326 		dm->backlight_caps.caps_valid = true;
3327 		if (caps.aux_support)
3328 			return;
3329 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3330 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3331 	} else {
3332 		dm->backlight_caps.min_input_signal =
3333 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3334 		dm->backlight_caps.max_input_signal =
3335 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3336 	}
3337 #else
3338 	if (dm->backlight_caps.aux_support)
3339 		return;
3340 
3341 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3342 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3343 #endif
3344 }
3345 
3346 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3347 				unsigned *min, unsigned *max)
3348 {
3349 	if (!caps)
3350 		return 0;
3351 
3352 	if (caps->aux_support) {
3353 		// Firmware limits are in nits, DC API wants millinits.
3354 		*max = 1000 * caps->aux_max_input_signal;
3355 		*min = 1000 * caps->aux_min_input_signal;
3356 	} else {
3357 		// Firmware limits are 8-bit, PWM control is 16-bit.
3358 		*max = 0x101 * caps->max_input_signal;
3359 		*min = 0x101 * caps->min_input_signal;
3360 	}
3361 	return 1;
3362 }
3363 
3364 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3365 					uint32_t brightness)
3366 {
3367 	unsigned min, max;
3368 
3369 	if (!get_brightness_range(caps, &min, &max))
3370 		return brightness;
3371 
3372 	// Rescale 0..255 to min..max
3373 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3374 				       AMDGPU_MAX_BL_LEVEL);
3375 }
3376 
3377 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3378 				      uint32_t brightness)
3379 {
3380 	unsigned min, max;
3381 
3382 	if (!get_brightness_range(caps, &min, &max))
3383 		return brightness;
3384 
3385 	if (brightness < min)
3386 		return 0;
3387 	// Rescale min..max to 0..255
3388 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3389 				 max - min);
3390 }
3391 
3392 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3393 {
3394 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3395 	struct amdgpu_dm_backlight_caps caps;
3396 	struct dc_link *link = NULL;
3397 	u32 brightness;
3398 	bool rc;
3399 
3400 	amdgpu_dm_update_backlight_caps(dm);
3401 	caps = dm->backlight_caps;
3402 
3403 	link = (struct dc_link *)dm->backlight_link;
3404 
3405 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3406 	// Change brightness based on AUX property
3407 	if (caps.aux_support)
3408 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3409 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3410 	else
3411 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3412 
3413 	return rc ? 0 : 1;
3414 }
3415 
3416 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3417 {
3418 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3419 	struct amdgpu_dm_backlight_caps caps;
3420 
3421 	amdgpu_dm_update_backlight_caps(dm);
3422 	caps = dm->backlight_caps;
3423 
3424 	if (caps.aux_support) {
3425 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3426 		u32 avg, peak;
3427 		bool rc;
3428 
3429 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3430 		if (!rc)
3431 			return bd->props.brightness;
3432 		return convert_brightness_to_user(&caps, avg);
3433 	} else {
3434 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3435 
3436 		if (ret == DC_ERROR_UNEXPECTED)
3437 			return bd->props.brightness;
3438 		return convert_brightness_to_user(&caps, ret);
3439 	}
3440 }
3441 
3442 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3443 	.options = BL_CORE_SUSPENDRESUME,
3444 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3445 	.update_status	= amdgpu_dm_backlight_update_status,
3446 };
3447 
3448 static void
3449 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3450 {
3451 	char bl_name[16];
3452 	struct backlight_properties props = { 0 };
3453 
3454 	amdgpu_dm_update_backlight_caps(dm);
3455 
3456 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3457 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3458 	props.type = BACKLIGHT_RAW;
3459 
3460 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3461 		 adev_to_drm(dm->adev)->primary->index);
3462 
3463 	dm->backlight_dev = backlight_device_register(bl_name,
3464 						      adev_to_drm(dm->adev)->dev,
3465 						      dm,
3466 						      &amdgpu_dm_backlight_ops,
3467 						      &props);
3468 
3469 	if (IS_ERR(dm->backlight_dev))
3470 		DRM_ERROR("DM: Backlight registration failed!\n");
3471 	else
3472 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3473 }
3474 
3475 #endif
3476 
3477 static int initialize_plane(struct amdgpu_display_manager *dm,
3478 			    struct amdgpu_mode_info *mode_info, int plane_id,
3479 			    enum drm_plane_type plane_type,
3480 			    const struct dc_plane_cap *plane_cap)
3481 {
3482 	struct drm_plane *plane;
3483 	unsigned long possible_crtcs;
3484 	int ret = 0;
3485 
3486 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3487 	if (!plane) {
3488 		DRM_ERROR("KMS: Failed to allocate plane\n");
3489 		return -ENOMEM;
3490 	}
3491 	plane->type = plane_type;
3492 
3493 	/*
3494 	 * HACK: IGT tests expect that the primary plane for a CRTC
3495 	 * can only have one possible CRTC. Only expose support for
3496 	 * any CRTC if they're not going to be used as a primary plane
3497 	 * for a CRTC - like overlay or underlay planes.
3498 	 */
3499 	possible_crtcs = 1 << plane_id;
3500 	if (plane_id >= dm->dc->caps.max_streams)
3501 		possible_crtcs = 0xff;
3502 
3503 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3504 
3505 	if (ret) {
3506 		DRM_ERROR("KMS: Failed to initialize plane\n");
3507 		kfree(plane);
3508 		return ret;
3509 	}
3510 
3511 	if (mode_info)
3512 		mode_info->planes[plane_id] = plane;
3513 
3514 	return ret;
3515 }
3516 
3517 
3518 static void register_backlight_device(struct amdgpu_display_manager *dm,
3519 				      struct dc_link *link)
3520 {
3521 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3522 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3523 
3524 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3525 	    link->type != dc_connection_none) {
3526 		/*
3527 		 * Event if registration failed, we should continue with
3528 		 * DM initialization because not having a backlight control
3529 		 * is better then a black screen.
3530 		 */
3531 		amdgpu_dm_register_backlight_device(dm);
3532 
3533 		if (dm->backlight_dev)
3534 			dm->backlight_link = link;
3535 	}
3536 #endif
3537 }
3538 
3539 
3540 /*
3541  * In this architecture, the association
3542  * connector -> encoder -> crtc
3543  * id not really requried. The crtc and connector will hold the
3544  * display_index as an abstraction to use with DAL component
3545  *
3546  * Returns 0 on success
3547  */
3548 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3549 {
3550 	struct amdgpu_display_manager *dm = &adev->dm;
3551 	int32_t i;
3552 	struct amdgpu_dm_connector *aconnector = NULL;
3553 	struct amdgpu_encoder *aencoder = NULL;
3554 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3555 	uint32_t link_cnt;
3556 	int32_t primary_planes;
3557 	enum dc_connection_type new_connection_type = dc_connection_none;
3558 	const struct dc_plane_cap *plane;
3559 
3560 	dm->display_indexes_num = dm->dc->caps.max_streams;
3561 	/* Update the actual used number of crtc */
3562 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3563 
3564 	link_cnt = dm->dc->caps.max_links;
3565 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3566 		DRM_ERROR("DM: Failed to initialize mode config\n");
3567 		return -EINVAL;
3568 	}
3569 
3570 	/* There is one primary plane per CRTC */
3571 	primary_planes = dm->dc->caps.max_streams;
3572 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3573 
3574 	/*
3575 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3576 	 * Order is reversed to match iteration order in atomic check.
3577 	 */
3578 	for (i = (primary_planes - 1); i >= 0; i--) {
3579 		plane = &dm->dc->caps.planes[i];
3580 
3581 		if (initialize_plane(dm, mode_info, i,
3582 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3583 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3584 			goto fail;
3585 		}
3586 	}
3587 
3588 	/*
3589 	 * Initialize overlay planes, index starting after primary planes.
3590 	 * These planes have a higher DRM index than the primary planes since
3591 	 * they should be considered as having a higher z-order.
3592 	 * Order is reversed to match iteration order in atomic check.
3593 	 *
3594 	 * Only support DCN for now, and only expose one so we don't encourage
3595 	 * userspace to use up all the pipes.
3596 	 */
3597 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3598 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3599 
3600 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3601 			continue;
3602 
3603 		if (!plane->blends_with_above || !plane->blends_with_below)
3604 			continue;
3605 
3606 		if (!plane->pixel_format_support.argb8888)
3607 			continue;
3608 
3609 		if (initialize_plane(dm, NULL, primary_planes + i,
3610 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3611 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3612 			goto fail;
3613 		}
3614 
3615 		/* Only create one overlay plane. */
3616 		break;
3617 	}
3618 
3619 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3620 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3621 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3622 			goto fail;
3623 		}
3624 
3625 	/* loops over all connectors on the board */
3626 	for (i = 0; i < link_cnt; i++) {
3627 		struct dc_link *link = NULL;
3628 
3629 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3630 			DRM_ERROR(
3631 				"KMS: Cannot support more than %d display indexes\n",
3632 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3633 			continue;
3634 		}
3635 
3636 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3637 		if (!aconnector)
3638 			goto fail;
3639 
3640 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3641 		if (!aencoder)
3642 			goto fail;
3643 
3644 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3645 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3646 			goto fail;
3647 		}
3648 
3649 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3650 			DRM_ERROR("KMS: Failed to initialize connector\n");
3651 			goto fail;
3652 		}
3653 
3654 		link = dc_get_link_at_index(dm->dc, i);
3655 
3656 		if (!dc_link_detect_sink(link, &new_connection_type))
3657 			DRM_ERROR("KMS: Failed to detect connector\n");
3658 
3659 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3660 			emulated_link_detect(link);
3661 			amdgpu_dm_update_connector_after_detect(aconnector);
3662 
3663 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3664 			amdgpu_dm_update_connector_after_detect(aconnector);
3665 			register_backlight_device(dm, link);
3666 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3667 				amdgpu_dm_set_psr_caps(link);
3668 		}
3669 
3670 
3671 	}
3672 
3673 	/* Software is initialized. Now we can register interrupt handlers. */
3674 	switch (adev->asic_type) {
3675 #if defined(CONFIG_DRM_AMD_DC_SI)
3676 	case CHIP_TAHITI:
3677 	case CHIP_PITCAIRN:
3678 	case CHIP_VERDE:
3679 	case CHIP_OLAND:
3680 		if (dce60_register_irq_handlers(dm->adev)) {
3681 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3682 			goto fail;
3683 		}
3684 		break;
3685 #endif
3686 	case CHIP_BONAIRE:
3687 	case CHIP_HAWAII:
3688 	case CHIP_KAVERI:
3689 	case CHIP_KABINI:
3690 	case CHIP_MULLINS:
3691 	case CHIP_TONGA:
3692 	case CHIP_FIJI:
3693 	case CHIP_CARRIZO:
3694 	case CHIP_STONEY:
3695 	case CHIP_POLARIS11:
3696 	case CHIP_POLARIS10:
3697 	case CHIP_POLARIS12:
3698 	case CHIP_VEGAM:
3699 	case CHIP_VEGA10:
3700 	case CHIP_VEGA12:
3701 	case CHIP_VEGA20:
3702 		if (dce110_register_irq_handlers(dm->adev)) {
3703 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3704 			goto fail;
3705 		}
3706 		break;
3707 #if defined(CONFIG_DRM_AMD_DC_DCN)
3708 	case CHIP_RAVEN:
3709 	case CHIP_NAVI12:
3710 	case CHIP_NAVI10:
3711 	case CHIP_NAVI14:
3712 	case CHIP_RENOIR:
3713 	case CHIP_SIENNA_CICHLID:
3714 	case CHIP_NAVY_FLOUNDER:
3715 	case CHIP_DIMGREY_CAVEFISH:
3716 	case CHIP_VANGOGH:
3717 		if (dcn10_register_irq_handlers(dm->adev)) {
3718 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3719 			goto fail;
3720 		}
3721 		break;
3722 #endif
3723 	default:
3724 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3725 		goto fail;
3726 	}
3727 
3728 	return 0;
3729 fail:
3730 	kfree(aencoder);
3731 	kfree(aconnector);
3732 
3733 	return -EINVAL;
3734 }
3735 
3736 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3737 {
3738 	drm_mode_config_cleanup(dm->ddev);
3739 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3740 	return;
3741 }
3742 
3743 /******************************************************************************
3744  * amdgpu_display_funcs functions
3745  *****************************************************************************/
3746 
3747 /*
3748  * dm_bandwidth_update - program display watermarks
3749  *
3750  * @adev: amdgpu_device pointer
3751  *
3752  * Calculate and program the display watermarks and line buffer allocation.
3753  */
3754 static void dm_bandwidth_update(struct amdgpu_device *adev)
3755 {
3756 	/* TODO: implement later */
3757 }
3758 
3759 static const struct amdgpu_display_funcs dm_display_funcs = {
3760 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3761 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3762 	.backlight_set_level = NULL, /* never called for DC */
3763 	.backlight_get_level = NULL, /* never called for DC */
3764 	.hpd_sense = NULL,/* called unconditionally */
3765 	.hpd_set_polarity = NULL, /* called unconditionally */
3766 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3767 	.page_flip_get_scanoutpos =
3768 		dm_crtc_get_scanoutpos,/* called unconditionally */
3769 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3770 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3771 };
3772 
3773 #if defined(CONFIG_DEBUG_KERNEL_DC)
3774 
3775 static ssize_t s3_debug_store(struct device *device,
3776 			      struct device_attribute *attr,
3777 			      const char *buf,
3778 			      size_t count)
3779 {
3780 	int ret;
3781 	int s3_state;
3782 	struct drm_device *drm_dev = dev_get_drvdata(device);
3783 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3784 
3785 	ret = kstrtoint(buf, 0, &s3_state);
3786 
3787 	if (ret == 0) {
3788 		if (s3_state) {
3789 			dm_resume(adev);
3790 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3791 		} else
3792 			dm_suspend(adev);
3793 	}
3794 
3795 	return ret == 0 ? count : 0;
3796 }
3797 
3798 DEVICE_ATTR_WO(s3_debug);
3799 
3800 #endif
3801 
3802 static int dm_early_init(void *handle)
3803 {
3804 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3805 
3806 	switch (adev->asic_type) {
3807 #if defined(CONFIG_DRM_AMD_DC_SI)
3808 	case CHIP_TAHITI:
3809 	case CHIP_PITCAIRN:
3810 	case CHIP_VERDE:
3811 		adev->mode_info.num_crtc = 6;
3812 		adev->mode_info.num_hpd = 6;
3813 		adev->mode_info.num_dig = 6;
3814 		break;
3815 	case CHIP_OLAND:
3816 		adev->mode_info.num_crtc = 2;
3817 		adev->mode_info.num_hpd = 2;
3818 		adev->mode_info.num_dig = 2;
3819 		break;
3820 #endif
3821 	case CHIP_BONAIRE:
3822 	case CHIP_HAWAII:
3823 		adev->mode_info.num_crtc = 6;
3824 		adev->mode_info.num_hpd = 6;
3825 		adev->mode_info.num_dig = 6;
3826 		break;
3827 	case CHIP_KAVERI:
3828 		adev->mode_info.num_crtc = 4;
3829 		adev->mode_info.num_hpd = 6;
3830 		adev->mode_info.num_dig = 7;
3831 		break;
3832 	case CHIP_KABINI:
3833 	case CHIP_MULLINS:
3834 		adev->mode_info.num_crtc = 2;
3835 		adev->mode_info.num_hpd = 6;
3836 		adev->mode_info.num_dig = 6;
3837 		break;
3838 	case CHIP_FIJI:
3839 	case CHIP_TONGA:
3840 		adev->mode_info.num_crtc = 6;
3841 		adev->mode_info.num_hpd = 6;
3842 		adev->mode_info.num_dig = 7;
3843 		break;
3844 	case CHIP_CARRIZO:
3845 		adev->mode_info.num_crtc = 3;
3846 		adev->mode_info.num_hpd = 6;
3847 		adev->mode_info.num_dig = 9;
3848 		break;
3849 	case CHIP_STONEY:
3850 		adev->mode_info.num_crtc = 2;
3851 		adev->mode_info.num_hpd = 6;
3852 		adev->mode_info.num_dig = 9;
3853 		break;
3854 	case CHIP_POLARIS11:
3855 	case CHIP_POLARIS12:
3856 		adev->mode_info.num_crtc = 5;
3857 		adev->mode_info.num_hpd = 5;
3858 		adev->mode_info.num_dig = 5;
3859 		break;
3860 	case CHIP_POLARIS10:
3861 	case CHIP_VEGAM:
3862 		adev->mode_info.num_crtc = 6;
3863 		adev->mode_info.num_hpd = 6;
3864 		adev->mode_info.num_dig = 6;
3865 		break;
3866 	case CHIP_VEGA10:
3867 	case CHIP_VEGA12:
3868 	case CHIP_VEGA20:
3869 		adev->mode_info.num_crtc = 6;
3870 		adev->mode_info.num_hpd = 6;
3871 		adev->mode_info.num_dig = 6;
3872 		break;
3873 #if defined(CONFIG_DRM_AMD_DC_DCN)
3874 	case CHIP_RAVEN:
3875 	case CHIP_RENOIR:
3876 	case CHIP_VANGOGH:
3877 		adev->mode_info.num_crtc = 4;
3878 		adev->mode_info.num_hpd = 4;
3879 		adev->mode_info.num_dig = 4;
3880 		break;
3881 	case CHIP_NAVI10:
3882 	case CHIP_NAVI12:
3883 	case CHIP_SIENNA_CICHLID:
3884 	case CHIP_NAVY_FLOUNDER:
3885 		adev->mode_info.num_crtc = 6;
3886 		adev->mode_info.num_hpd = 6;
3887 		adev->mode_info.num_dig = 6;
3888 		break;
3889 	case CHIP_NAVI14:
3890 	case CHIP_DIMGREY_CAVEFISH:
3891 		adev->mode_info.num_crtc = 5;
3892 		adev->mode_info.num_hpd = 5;
3893 		adev->mode_info.num_dig = 5;
3894 		break;
3895 #endif
3896 	default:
3897 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3898 		return -EINVAL;
3899 	}
3900 
3901 	amdgpu_dm_set_irq_funcs(adev);
3902 
3903 	if (adev->mode_info.funcs == NULL)
3904 		adev->mode_info.funcs = &dm_display_funcs;
3905 
3906 	/*
3907 	 * Note: Do NOT change adev->audio_endpt_rreg and
3908 	 * adev->audio_endpt_wreg because they are initialised in
3909 	 * amdgpu_device_init()
3910 	 */
3911 #if defined(CONFIG_DEBUG_KERNEL_DC)
3912 	device_create_file(
3913 		adev_to_drm(adev)->dev,
3914 		&dev_attr_s3_debug);
3915 #endif
3916 
3917 	return 0;
3918 }
3919 
3920 static bool modeset_required(struct drm_crtc_state *crtc_state,
3921 			     struct dc_stream_state *new_stream,
3922 			     struct dc_stream_state *old_stream)
3923 {
3924 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3925 }
3926 
3927 static bool modereset_required(struct drm_crtc_state *crtc_state)
3928 {
3929 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3930 }
3931 
3932 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3933 {
3934 	drm_encoder_cleanup(encoder);
3935 	kfree(encoder);
3936 }
3937 
3938 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3939 	.destroy = amdgpu_dm_encoder_destroy,
3940 };
3941 
3942 
3943 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3944 					 struct drm_framebuffer *fb,
3945 					 int *min_downscale, int *max_upscale)
3946 {
3947 	struct amdgpu_device *adev = drm_to_adev(dev);
3948 	struct dc *dc = adev->dm.dc;
3949 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3950 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3951 
3952 	switch (fb->format->format) {
3953 	case DRM_FORMAT_P010:
3954 	case DRM_FORMAT_NV12:
3955 	case DRM_FORMAT_NV21:
3956 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3957 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3958 		break;
3959 
3960 	case DRM_FORMAT_XRGB16161616F:
3961 	case DRM_FORMAT_ARGB16161616F:
3962 	case DRM_FORMAT_XBGR16161616F:
3963 	case DRM_FORMAT_ABGR16161616F:
3964 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3965 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3966 		break;
3967 
3968 	default:
3969 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3970 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3971 		break;
3972 	}
3973 
3974 	/*
3975 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3976 	 * scaling factor of 1.0 == 1000 units.
3977 	 */
3978 	if (*max_upscale == 1)
3979 		*max_upscale = 1000;
3980 
3981 	if (*min_downscale == 1)
3982 		*min_downscale = 1000;
3983 }
3984 
3985 
3986 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3987 				struct dc_scaling_info *scaling_info)
3988 {
3989 	int scale_w, scale_h, min_downscale, max_upscale;
3990 
3991 	memset(scaling_info, 0, sizeof(*scaling_info));
3992 
3993 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3994 	scaling_info->src_rect.x = state->src_x >> 16;
3995 	scaling_info->src_rect.y = state->src_y >> 16;
3996 
3997 	scaling_info->src_rect.width = state->src_w >> 16;
3998 	if (scaling_info->src_rect.width == 0)
3999 		return -EINVAL;
4000 
4001 	scaling_info->src_rect.height = state->src_h >> 16;
4002 	if (scaling_info->src_rect.height == 0)
4003 		return -EINVAL;
4004 
4005 	scaling_info->dst_rect.x = state->crtc_x;
4006 	scaling_info->dst_rect.y = state->crtc_y;
4007 
4008 	if (state->crtc_w == 0)
4009 		return -EINVAL;
4010 
4011 	scaling_info->dst_rect.width = state->crtc_w;
4012 
4013 	if (state->crtc_h == 0)
4014 		return -EINVAL;
4015 
4016 	scaling_info->dst_rect.height = state->crtc_h;
4017 
4018 	/* DRM doesn't specify clipping on destination output. */
4019 	scaling_info->clip_rect = scaling_info->dst_rect;
4020 
4021 	/* Validate scaling per-format with DC plane caps */
4022 	if (state->plane && state->plane->dev && state->fb) {
4023 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4024 					     &min_downscale, &max_upscale);
4025 	} else {
4026 		min_downscale = 250;
4027 		max_upscale = 16000;
4028 	}
4029 
4030 	scale_w = scaling_info->dst_rect.width * 1000 /
4031 		  scaling_info->src_rect.width;
4032 
4033 	if (scale_w < min_downscale || scale_w > max_upscale)
4034 		return -EINVAL;
4035 
4036 	scale_h = scaling_info->dst_rect.height * 1000 /
4037 		  scaling_info->src_rect.height;
4038 
4039 	if (scale_h < min_downscale || scale_h > max_upscale)
4040 		return -EINVAL;
4041 
4042 	/*
4043 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4044 	 * assume reasonable defaults based on the format.
4045 	 */
4046 
4047 	return 0;
4048 }
4049 
4050 static void
4051 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4052 				 uint64_t tiling_flags)
4053 {
4054 	/* Fill GFX8 params */
4055 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4056 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4057 
4058 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4059 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4060 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4061 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4062 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4063 
4064 		/* XXX fix me for VI */
4065 		tiling_info->gfx8.num_banks = num_banks;
4066 		tiling_info->gfx8.array_mode =
4067 				DC_ARRAY_2D_TILED_THIN1;
4068 		tiling_info->gfx8.tile_split = tile_split;
4069 		tiling_info->gfx8.bank_width = bankw;
4070 		tiling_info->gfx8.bank_height = bankh;
4071 		tiling_info->gfx8.tile_aspect = mtaspect;
4072 		tiling_info->gfx8.tile_mode =
4073 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4074 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4075 			== DC_ARRAY_1D_TILED_THIN1) {
4076 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4077 	}
4078 
4079 	tiling_info->gfx8.pipe_config =
4080 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4081 }
4082 
4083 static void
4084 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4085 				  union dc_tiling_info *tiling_info)
4086 {
4087 	tiling_info->gfx9.num_pipes =
4088 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4089 	tiling_info->gfx9.num_banks =
4090 		adev->gfx.config.gb_addr_config_fields.num_banks;
4091 	tiling_info->gfx9.pipe_interleave =
4092 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4093 	tiling_info->gfx9.num_shader_engines =
4094 		adev->gfx.config.gb_addr_config_fields.num_se;
4095 	tiling_info->gfx9.max_compressed_frags =
4096 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4097 	tiling_info->gfx9.num_rb_per_se =
4098 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4099 	tiling_info->gfx9.shaderEnable = 1;
4100 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4101 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4102 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4103 	    adev->asic_type == CHIP_VANGOGH)
4104 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4105 }
4106 
4107 static int
4108 validate_dcc(struct amdgpu_device *adev,
4109 	     const enum surface_pixel_format format,
4110 	     const enum dc_rotation_angle rotation,
4111 	     const union dc_tiling_info *tiling_info,
4112 	     const struct dc_plane_dcc_param *dcc,
4113 	     const struct dc_plane_address *address,
4114 	     const struct plane_size *plane_size)
4115 {
4116 	struct dc *dc = adev->dm.dc;
4117 	struct dc_dcc_surface_param input;
4118 	struct dc_surface_dcc_cap output;
4119 
4120 	memset(&input, 0, sizeof(input));
4121 	memset(&output, 0, sizeof(output));
4122 
4123 	if (!dcc->enable)
4124 		return 0;
4125 
4126 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4127 	    !dc->cap_funcs.get_dcc_compression_cap)
4128 		return -EINVAL;
4129 
4130 	input.format = format;
4131 	input.surface_size.width = plane_size->surface_size.width;
4132 	input.surface_size.height = plane_size->surface_size.height;
4133 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4134 
4135 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4136 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4137 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4138 		input.scan = SCAN_DIRECTION_VERTICAL;
4139 
4140 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4141 		return -EINVAL;
4142 
4143 	if (!output.capable)
4144 		return -EINVAL;
4145 
4146 	if (dcc->independent_64b_blks == 0 &&
4147 	    output.grph.rgb.independent_64b_blks != 0)
4148 		return -EINVAL;
4149 
4150 	return 0;
4151 }
4152 
4153 static bool
4154 modifier_has_dcc(uint64_t modifier)
4155 {
4156 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4157 }
4158 
4159 static unsigned
4160 modifier_gfx9_swizzle_mode(uint64_t modifier)
4161 {
4162 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4163 		return 0;
4164 
4165 	return AMD_FMT_MOD_GET(TILE, modifier);
4166 }
4167 
4168 static const struct drm_format_info *
4169 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4170 {
4171 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4172 }
4173 
4174 static void
4175 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4176 				    union dc_tiling_info *tiling_info,
4177 				    uint64_t modifier)
4178 {
4179 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4180 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4181 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4182 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4183 
4184 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4185 
4186 	if (!IS_AMD_FMT_MOD(modifier))
4187 		return;
4188 
4189 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4190 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4191 
4192 	if (adev->family >= AMDGPU_FAMILY_NV) {
4193 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4194 	} else {
4195 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4196 
4197 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4198 	}
4199 }
4200 
4201 enum dm_micro_swizzle {
4202 	MICRO_SWIZZLE_Z = 0,
4203 	MICRO_SWIZZLE_S = 1,
4204 	MICRO_SWIZZLE_D = 2,
4205 	MICRO_SWIZZLE_R = 3
4206 };
4207 
4208 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4209 					  uint32_t format,
4210 					  uint64_t modifier)
4211 {
4212 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4213 	const struct drm_format_info *info = drm_format_info(format);
4214 
4215 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4216 
4217 	if (!info)
4218 		return false;
4219 
4220 	/*
4221 	 * We always have to allow this modifier, because core DRM still
4222 	 * checks LINEAR support if userspace does not provide modifers.
4223 	 */
4224 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4225 		return true;
4226 
4227 	/*
4228 	 * The arbitrary tiling support for multiplane formats has not been hooked
4229 	 * up.
4230 	 */
4231 	if (info->num_planes > 1)
4232 		return false;
4233 
4234 	/*
4235 	 * For D swizzle the canonical modifier depends on the bpp, so check
4236 	 * it here.
4237 	 */
4238 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4239 	    adev->family >= AMDGPU_FAMILY_NV) {
4240 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4241 			return false;
4242 	}
4243 
4244 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4245 	    info->cpp[0] < 8)
4246 		return false;
4247 
4248 	if (modifier_has_dcc(modifier)) {
4249 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4250 		if (info->cpp[0] != 4)
4251 			return false;
4252 	}
4253 
4254 	return true;
4255 }
4256 
4257 static void
4258 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4259 {
4260 	if (!*mods)
4261 		return;
4262 
4263 	if (*cap - *size < 1) {
4264 		uint64_t new_cap = *cap * 2;
4265 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4266 
4267 		if (!new_mods) {
4268 			kfree(*mods);
4269 			*mods = NULL;
4270 			return;
4271 		}
4272 
4273 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4274 		kfree(*mods);
4275 		*mods = new_mods;
4276 		*cap = new_cap;
4277 	}
4278 
4279 	(*mods)[*size] = mod;
4280 	*size += 1;
4281 }
4282 
4283 static void
4284 add_gfx9_modifiers(const struct amdgpu_device *adev,
4285 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4286 {
4287 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4288 	int pipe_xor_bits = min(8, pipes +
4289 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4290 	int bank_xor_bits = min(8 - pipe_xor_bits,
4291 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4292 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4293 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4294 
4295 
4296 	if (adev->family == AMDGPU_FAMILY_RV) {
4297 		/* Raven2 and later */
4298 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4299 
4300 		/*
4301 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4302 		 * doesn't support _D on DCN
4303 		 */
4304 
4305 		if (has_constant_encode) {
4306 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4307 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4308 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4309 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4310 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4311 				    AMD_FMT_MOD_SET(DCC, 1) |
4312 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4313 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4314 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4315 		}
4316 
4317 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4318 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4319 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4320 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4321 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4322 			    AMD_FMT_MOD_SET(DCC, 1) |
4323 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4324 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4325 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4326 
4327 		if (has_constant_encode) {
4328 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4329 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4330 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4331 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4332 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4333 				    AMD_FMT_MOD_SET(DCC, 1) |
4334 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4335 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4336 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4337 
4338 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4339 				    AMD_FMT_MOD_SET(RB, rb) |
4340 				    AMD_FMT_MOD_SET(PIPE, pipes));
4341 		}
4342 
4343 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4344 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4345 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4346 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4347 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4348 			    AMD_FMT_MOD_SET(DCC, 1) |
4349 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4350 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4351 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4352 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4353 			    AMD_FMT_MOD_SET(RB, rb) |
4354 			    AMD_FMT_MOD_SET(PIPE, pipes));
4355 	}
4356 
4357 	/*
4358 	 * Only supported for 64bpp on Raven, will be filtered on format in
4359 	 * dm_plane_format_mod_supported.
4360 	 */
4361 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4362 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4363 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4364 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4365 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4366 
4367 	if (adev->family == AMDGPU_FAMILY_RV) {
4368 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4369 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4370 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4371 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4372 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4373 	}
4374 
4375 	/*
4376 	 * Only supported for 64bpp on Raven, will be filtered on format in
4377 	 * dm_plane_format_mod_supported.
4378 	 */
4379 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4380 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4381 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4382 
4383 	if (adev->family == AMDGPU_FAMILY_RV) {
4384 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4385 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4386 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4387 	}
4388 }
4389 
4390 static void
4391 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4392 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4393 {
4394 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4395 
4396 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4397 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4398 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4399 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4400 		    AMD_FMT_MOD_SET(DCC, 1) |
4401 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4402 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4403 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4404 
4405 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4406 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4407 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4408 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4409 		    AMD_FMT_MOD_SET(DCC, 1) |
4410 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4411 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4412 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4413 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4414 
4415 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4417 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4418 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4419 
4420 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4421 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4422 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4423 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4424 
4425 
4426 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4427 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4428 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4429 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4430 
4431 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4433 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4434 }
4435 
4436 static void
4437 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4438 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4439 {
4440 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4441 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4442 
4443 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4444 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4445 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4446 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4447 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4448 		    AMD_FMT_MOD_SET(DCC, 1) |
4449 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4450 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4451 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4452 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4453 
4454 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4455 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4456 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4457 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4458 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4459 		    AMD_FMT_MOD_SET(DCC, 1) |
4460 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4461 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4462 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4463 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4464 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4465 
4466 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4467 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4468 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4469 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4470 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4471 
4472 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4473 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4474 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4475 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4476 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4477 
4478 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4479 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4480 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4481 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4482 
4483 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4484 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4485 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4486 }
4487 
4488 static int
4489 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4490 {
4491 	uint64_t size = 0, capacity = 128;
4492 	*mods = NULL;
4493 
4494 	/* We have not hooked up any pre-GFX9 modifiers. */
4495 	if (adev->family < AMDGPU_FAMILY_AI)
4496 		return 0;
4497 
4498 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4499 
4500 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4501 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4502 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4503 		return *mods ? 0 : -ENOMEM;
4504 	}
4505 
4506 	switch (adev->family) {
4507 	case AMDGPU_FAMILY_AI:
4508 	case AMDGPU_FAMILY_RV:
4509 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4510 		break;
4511 	case AMDGPU_FAMILY_NV:
4512 	case AMDGPU_FAMILY_VGH:
4513 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4514 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4515 		else
4516 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4517 		break;
4518 	}
4519 
4520 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4521 
4522 	/* INVALID marks the end of the list. */
4523 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4524 
4525 	if (!*mods)
4526 		return -ENOMEM;
4527 
4528 	return 0;
4529 }
4530 
4531 static int
4532 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4533 					  const struct amdgpu_framebuffer *afb,
4534 					  const enum surface_pixel_format format,
4535 					  const enum dc_rotation_angle rotation,
4536 					  const struct plane_size *plane_size,
4537 					  union dc_tiling_info *tiling_info,
4538 					  struct dc_plane_dcc_param *dcc,
4539 					  struct dc_plane_address *address,
4540 					  const bool force_disable_dcc)
4541 {
4542 	const uint64_t modifier = afb->base.modifier;
4543 	int ret;
4544 
4545 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4546 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4547 
4548 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4549 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4550 
4551 		dcc->enable = 1;
4552 		dcc->meta_pitch = afb->base.pitches[1];
4553 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4554 
4555 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4556 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4557 	}
4558 
4559 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4560 	if (ret)
4561 		return ret;
4562 
4563 	return 0;
4564 }
4565 
4566 static int
4567 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4568 			     const struct amdgpu_framebuffer *afb,
4569 			     const enum surface_pixel_format format,
4570 			     const enum dc_rotation_angle rotation,
4571 			     const uint64_t tiling_flags,
4572 			     union dc_tiling_info *tiling_info,
4573 			     struct plane_size *plane_size,
4574 			     struct dc_plane_dcc_param *dcc,
4575 			     struct dc_plane_address *address,
4576 			     bool tmz_surface,
4577 			     bool force_disable_dcc)
4578 {
4579 	const struct drm_framebuffer *fb = &afb->base;
4580 	int ret;
4581 
4582 	memset(tiling_info, 0, sizeof(*tiling_info));
4583 	memset(plane_size, 0, sizeof(*plane_size));
4584 	memset(dcc, 0, sizeof(*dcc));
4585 	memset(address, 0, sizeof(*address));
4586 
4587 	address->tmz_surface = tmz_surface;
4588 
4589 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4590 		uint64_t addr = afb->address + fb->offsets[0];
4591 
4592 		plane_size->surface_size.x = 0;
4593 		plane_size->surface_size.y = 0;
4594 		plane_size->surface_size.width = fb->width;
4595 		plane_size->surface_size.height = fb->height;
4596 		plane_size->surface_pitch =
4597 			fb->pitches[0] / fb->format->cpp[0];
4598 
4599 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4600 		address->grph.addr.low_part = lower_32_bits(addr);
4601 		address->grph.addr.high_part = upper_32_bits(addr);
4602 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4603 		uint64_t luma_addr = afb->address + fb->offsets[0];
4604 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4605 
4606 		plane_size->surface_size.x = 0;
4607 		plane_size->surface_size.y = 0;
4608 		plane_size->surface_size.width = fb->width;
4609 		plane_size->surface_size.height = fb->height;
4610 		plane_size->surface_pitch =
4611 			fb->pitches[0] / fb->format->cpp[0];
4612 
4613 		plane_size->chroma_size.x = 0;
4614 		plane_size->chroma_size.y = 0;
4615 		/* TODO: set these based on surface format */
4616 		plane_size->chroma_size.width = fb->width / 2;
4617 		plane_size->chroma_size.height = fb->height / 2;
4618 
4619 		plane_size->chroma_pitch =
4620 			fb->pitches[1] / fb->format->cpp[1];
4621 
4622 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4623 		address->video_progressive.luma_addr.low_part =
4624 			lower_32_bits(luma_addr);
4625 		address->video_progressive.luma_addr.high_part =
4626 			upper_32_bits(luma_addr);
4627 		address->video_progressive.chroma_addr.low_part =
4628 			lower_32_bits(chroma_addr);
4629 		address->video_progressive.chroma_addr.high_part =
4630 			upper_32_bits(chroma_addr);
4631 	}
4632 
4633 	if (adev->family >= AMDGPU_FAMILY_AI) {
4634 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4635 								rotation, plane_size,
4636 								tiling_info, dcc,
4637 								address,
4638 								force_disable_dcc);
4639 		if (ret)
4640 			return ret;
4641 	} else {
4642 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4643 	}
4644 
4645 	return 0;
4646 }
4647 
4648 static void
4649 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4650 			       bool *per_pixel_alpha, bool *global_alpha,
4651 			       int *global_alpha_value)
4652 {
4653 	*per_pixel_alpha = false;
4654 	*global_alpha = false;
4655 	*global_alpha_value = 0xff;
4656 
4657 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4658 		return;
4659 
4660 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4661 		static const uint32_t alpha_formats[] = {
4662 			DRM_FORMAT_ARGB8888,
4663 			DRM_FORMAT_RGBA8888,
4664 			DRM_FORMAT_ABGR8888,
4665 		};
4666 		uint32_t format = plane_state->fb->format->format;
4667 		unsigned int i;
4668 
4669 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4670 			if (format == alpha_formats[i]) {
4671 				*per_pixel_alpha = true;
4672 				break;
4673 			}
4674 		}
4675 	}
4676 
4677 	if (plane_state->alpha < 0xffff) {
4678 		*global_alpha = true;
4679 		*global_alpha_value = plane_state->alpha >> 8;
4680 	}
4681 }
4682 
4683 static int
4684 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4685 			    const enum surface_pixel_format format,
4686 			    enum dc_color_space *color_space)
4687 {
4688 	bool full_range;
4689 
4690 	*color_space = COLOR_SPACE_SRGB;
4691 
4692 	/* DRM color properties only affect non-RGB formats. */
4693 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4694 		return 0;
4695 
4696 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4697 
4698 	switch (plane_state->color_encoding) {
4699 	case DRM_COLOR_YCBCR_BT601:
4700 		if (full_range)
4701 			*color_space = COLOR_SPACE_YCBCR601;
4702 		else
4703 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4704 		break;
4705 
4706 	case DRM_COLOR_YCBCR_BT709:
4707 		if (full_range)
4708 			*color_space = COLOR_SPACE_YCBCR709;
4709 		else
4710 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4711 		break;
4712 
4713 	case DRM_COLOR_YCBCR_BT2020:
4714 		if (full_range)
4715 			*color_space = COLOR_SPACE_2020_YCBCR;
4716 		else
4717 			return -EINVAL;
4718 		break;
4719 
4720 	default:
4721 		return -EINVAL;
4722 	}
4723 
4724 	return 0;
4725 }
4726 
4727 static int
4728 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4729 			    const struct drm_plane_state *plane_state,
4730 			    const uint64_t tiling_flags,
4731 			    struct dc_plane_info *plane_info,
4732 			    struct dc_plane_address *address,
4733 			    bool tmz_surface,
4734 			    bool force_disable_dcc)
4735 {
4736 	const struct drm_framebuffer *fb = plane_state->fb;
4737 	const struct amdgpu_framebuffer *afb =
4738 		to_amdgpu_framebuffer(plane_state->fb);
4739 	int ret;
4740 
4741 	memset(plane_info, 0, sizeof(*plane_info));
4742 
4743 	switch (fb->format->format) {
4744 	case DRM_FORMAT_C8:
4745 		plane_info->format =
4746 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4747 		break;
4748 	case DRM_FORMAT_RGB565:
4749 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4750 		break;
4751 	case DRM_FORMAT_XRGB8888:
4752 	case DRM_FORMAT_ARGB8888:
4753 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4754 		break;
4755 	case DRM_FORMAT_XRGB2101010:
4756 	case DRM_FORMAT_ARGB2101010:
4757 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4758 		break;
4759 	case DRM_FORMAT_XBGR2101010:
4760 	case DRM_FORMAT_ABGR2101010:
4761 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4762 		break;
4763 	case DRM_FORMAT_XBGR8888:
4764 	case DRM_FORMAT_ABGR8888:
4765 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4766 		break;
4767 	case DRM_FORMAT_NV21:
4768 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4769 		break;
4770 	case DRM_FORMAT_NV12:
4771 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4772 		break;
4773 	case DRM_FORMAT_P010:
4774 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4775 		break;
4776 	case DRM_FORMAT_XRGB16161616F:
4777 	case DRM_FORMAT_ARGB16161616F:
4778 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4779 		break;
4780 	case DRM_FORMAT_XBGR16161616F:
4781 	case DRM_FORMAT_ABGR16161616F:
4782 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4783 		break;
4784 	default:
4785 		DRM_ERROR(
4786 			"Unsupported screen format %p4cc\n",
4787 			&fb->format->format);
4788 		return -EINVAL;
4789 	}
4790 
4791 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4792 	case DRM_MODE_ROTATE_0:
4793 		plane_info->rotation = ROTATION_ANGLE_0;
4794 		break;
4795 	case DRM_MODE_ROTATE_90:
4796 		plane_info->rotation = ROTATION_ANGLE_90;
4797 		break;
4798 	case DRM_MODE_ROTATE_180:
4799 		plane_info->rotation = ROTATION_ANGLE_180;
4800 		break;
4801 	case DRM_MODE_ROTATE_270:
4802 		plane_info->rotation = ROTATION_ANGLE_270;
4803 		break;
4804 	default:
4805 		plane_info->rotation = ROTATION_ANGLE_0;
4806 		break;
4807 	}
4808 
4809 	plane_info->visible = true;
4810 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4811 
4812 	plane_info->layer_index = 0;
4813 
4814 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4815 					  &plane_info->color_space);
4816 	if (ret)
4817 		return ret;
4818 
4819 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4820 					   plane_info->rotation, tiling_flags,
4821 					   &plane_info->tiling_info,
4822 					   &plane_info->plane_size,
4823 					   &plane_info->dcc, address, tmz_surface,
4824 					   force_disable_dcc);
4825 	if (ret)
4826 		return ret;
4827 
4828 	fill_blending_from_plane_state(
4829 		plane_state, &plane_info->per_pixel_alpha,
4830 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4831 
4832 	return 0;
4833 }
4834 
4835 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4836 				    struct dc_plane_state *dc_plane_state,
4837 				    struct drm_plane_state *plane_state,
4838 				    struct drm_crtc_state *crtc_state)
4839 {
4840 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4841 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4842 	struct dc_scaling_info scaling_info;
4843 	struct dc_plane_info plane_info;
4844 	int ret;
4845 	bool force_disable_dcc = false;
4846 
4847 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4848 	if (ret)
4849 		return ret;
4850 
4851 	dc_plane_state->src_rect = scaling_info.src_rect;
4852 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4853 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4854 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4855 
4856 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4857 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4858 					  afb->tiling_flags,
4859 					  &plane_info,
4860 					  &dc_plane_state->address,
4861 					  afb->tmz_surface,
4862 					  force_disable_dcc);
4863 	if (ret)
4864 		return ret;
4865 
4866 	dc_plane_state->format = plane_info.format;
4867 	dc_plane_state->color_space = plane_info.color_space;
4868 	dc_plane_state->format = plane_info.format;
4869 	dc_plane_state->plane_size = plane_info.plane_size;
4870 	dc_plane_state->rotation = plane_info.rotation;
4871 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4872 	dc_plane_state->stereo_format = plane_info.stereo_format;
4873 	dc_plane_state->tiling_info = plane_info.tiling_info;
4874 	dc_plane_state->visible = plane_info.visible;
4875 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4876 	dc_plane_state->global_alpha = plane_info.global_alpha;
4877 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4878 	dc_plane_state->dcc = plane_info.dcc;
4879 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4880 	dc_plane_state->flip_int_enabled = true;
4881 
4882 	/*
4883 	 * Always set input transfer function, since plane state is refreshed
4884 	 * every time.
4885 	 */
4886 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4887 	if (ret)
4888 		return ret;
4889 
4890 	return 0;
4891 }
4892 
4893 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4894 					   const struct dm_connector_state *dm_state,
4895 					   struct dc_stream_state *stream)
4896 {
4897 	enum amdgpu_rmx_type rmx_type;
4898 
4899 	struct rect src = { 0 }; /* viewport in composition space*/
4900 	struct rect dst = { 0 }; /* stream addressable area */
4901 
4902 	/* no mode. nothing to be done */
4903 	if (!mode)
4904 		return;
4905 
4906 	/* Full screen scaling by default */
4907 	src.width = mode->hdisplay;
4908 	src.height = mode->vdisplay;
4909 	dst.width = stream->timing.h_addressable;
4910 	dst.height = stream->timing.v_addressable;
4911 
4912 	if (dm_state) {
4913 		rmx_type = dm_state->scaling;
4914 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4915 			if (src.width * dst.height <
4916 					src.height * dst.width) {
4917 				/* height needs less upscaling/more downscaling */
4918 				dst.width = src.width *
4919 						dst.height / src.height;
4920 			} else {
4921 				/* width needs less upscaling/more downscaling */
4922 				dst.height = src.height *
4923 						dst.width / src.width;
4924 			}
4925 		} else if (rmx_type == RMX_CENTER) {
4926 			dst = src;
4927 		}
4928 
4929 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4930 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4931 
4932 		if (dm_state->underscan_enable) {
4933 			dst.x += dm_state->underscan_hborder / 2;
4934 			dst.y += dm_state->underscan_vborder / 2;
4935 			dst.width -= dm_state->underscan_hborder;
4936 			dst.height -= dm_state->underscan_vborder;
4937 		}
4938 	}
4939 
4940 	stream->src = src;
4941 	stream->dst = dst;
4942 
4943 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4944 			dst.x, dst.y, dst.width, dst.height);
4945 
4946 }
4947 
4948 static enum dc_color_depth
4949 convert_color_depth_from_display_info(const struct drm_connector *connector,
4950 				      bool is_y420, int requested_bpc)
4951 {
4952 	uint8_t bpc;
4953 
4954 	if (is_y420) {
4955 		bpc = 8;
4956 
4957 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4958 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4959 			bpc = 16;
4960 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4961 			bpc = 12;
4962 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4963 			bpc = 10;
4964 	} else {
4965 		bpc = (uint8_t)connector->display_info.bpc;
4966 		/* Assume 8 bpc by default if no bpc is specified. */
4967 		bpc = bpc ? bpc : 8;
4968 	}
4969 
4970 	if (requested_bpc > 0) {
4971 		/*
4972 		 * Cap display bpc based on the user requested value.
4973 		 *
4974 		 * The value for state->max_bpc may not correctly updated
4975 		 * depending on when the connector gets added to the state
4976 		 * or if this was called outside of atomic check, so it
4977 		 * can't be used directly.
4978 		 */
4979 		bpc = min_t(u8, bpc, requested_bpc);
4980 
4981 		/* Round down to the nearest even number. */
4982 		bpc = bpc - (bpc & 1);
4983 	}
4984 
4985 	switch (bpc) {
4986 	case 0:
4987 		/*
4988 		 * Temporary Work around, DRM doesn't parse color depth for
4989 		 * EDID revision before 1.4
4990 		 * TODO: Fix edid parsing
4991 		 */
4992 		return COLOR_DEPTH_888;
4993 	case 6:
4994 		return COLOR_DEPTH_666;
4995 	case 8:
4996 		return COLOR_DEPTH_888;
4997 	case 10:
4998 		return COLOR_DEPTH_101010;
4999 	case 12:
5000 		return COLOR_DEPTH_121212;
5001 	case 14:
5002 		return COLOR_DEPTH_141414;
5003 	case 16:
5004 		return COLOR_DEPTH_161616;
5005 	default:
5006 		return COLOR_DEPTH_UNDEFINED;
5007 	}
5008 }
5009 
5010 static enum dc_aspect_ratio
5011 get_aspect_ratio(const struct drm_display_mode *mode_in)
5012 {
5013 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5014 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5015 }
5016 
5017 static enum dc_color_space
5018 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5019 {
5020 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5021 
5022 	switch (dc_crtc_timing->pixel_encoding)	{
5023 	case PIXEL_ENCODING_YCBCR422:
5024 	case PIXEL_ENCODING_YCBCR444:
5025 	case PIXEL_ENCODING_YCBCR420:
5026 	{
5027 		/*
5028 		 * 27030khz is the separation point between HDTV and SDTV
5029 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5030 		 * respectively
5031 		 */
5032 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5033 			if (dc_crtc_timing->flags.Y_ONLY)
5034 				color_space =
5035 					COLOR_SPACE_YCBCR709_LIMITED;
5036 			else
5037 				color_space = COLOR_SPACE_YCBCR709;
5038 		} else {
5039 			if (dc_crtc_timing->flags.Y_ONLY)
5040 				color_space =
5041 					COLOR_SPACE_YCBCR601_LIMITED;
5042 			else
5043 				color_space = COLOR_SPACE_YCBCR601;
5044 		}
5045 
5046 	}
5047 	break;
5048 	case PIXEL_ENCODING_RGB:
5049 		color_space = COLOR_SPACE_SRGB;
5050 		break;
5051 
5052 	default:
5053 		WARN_ON(1);
5054 		break;
5055 	}
5056 
5057 	return color_space;
5058 }
5059 
5060 static bool adjust_colour_depth_from_display_info(
5061 	struct dc_crtc_timing *timing_out,
5062 	const struct drm_display_info *info)
5063 {
5064 	enum dc_color_depth depth = timing_out->display_color_depth;
5065 	int normalized_clk;
5066 	do {
5067 		normalized_clk = timing_out->pix_clk_100hz / 10;
5068 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5069 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5070 			normalized_clk /= 2;
5071 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5072 		switch (depth) {
5073 		case COLOR_DEPTH_888:
5074 			break;
5075 		case COLOR_DEPTH_101010:
5076 			normalized_clk = (normalized_clk * 30) / 24;
5077 			break;
5078 		case COLOR_DEPTH_121212:
5079 			normalized_clk = (normalized_clk * 36) / 24;
5080 			break;
5081 		case COLOR_DEPTH_161616:
5082 			normalized_clk = (normalized_clk * 48) / 24;
5083 			break;
5084 		default:
5085 			/* The above depths are the only ones valid for HDMI. */
5086 			return false;
5087 		}
5088 		if (normalized_clk <= info->max_tmds_clock) {
5089 			timing_out->display_color_depth = depth;
5090 			return true;
5091 		}
5092 	} while (--depth > COLOR_DEPTH_666);
5093 	return false;
5094 }
5095 
5096 static void fill_stream_properties_from_drm_display_mode(
5097 	struct dc_stream_state *stream,
5098 	const struct drm_display_mode *mode_in,
5099 	const struct drm_connector *connector,
5100 	const struct drm_connector_state *connector_state,
5101 	const struct dc_stream_state *old_stream,
5102 	int requested_bpc)
5103 {
5104 	struct dc_crtc_timing *timing_out = &stream->timing;
5105 	const struct drm_display_info *info = &connector->display_info;
5106 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5107 	struct hdmi_vendor_infoframe hv_frame;
5108 	struct hdmi_avi_infoframe avi_frame;
5109 
5110 	memset(&hv_frame, 0, sizeof(hv_frame));
5111 	memset(&avi_frame, 0, sizeof(avi_frame));
5112 
5113 	timing_out->h_border_left = 0;
5114 	timing_out->h_border_right = 0;
5115 	timing_out->v_border_top = 0;
5116 	timing_out->v_border_bottom = 0;
5117 	/* TODO: un-hardcode */
5118 	if (drm_mode_is_420_only(info, mode_in)
5119 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5120 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5121 	else if (drm_mode_is_420_also(info, mode_in)
5122 			&& aconnector->force_yuv420_output)
5123 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5124 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5125 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5126 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5127 	else
5128 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5129 
5130 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5131 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5132 		connector,
5133 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5134 		requested_bpc);
5135 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5136 	timing_out->hdmi_vic = 0;
5137 
5138 	if(old_stream) {
5139 		timing_out->vic = old_stream->timing.vic;
5140 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5141 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5142 	} else {
5143 		timing_out->vic = drm_match_cea_mode(mode_in);
5144 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5145 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5146 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5147 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5148 	}
5149 
5150 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5151 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5152 		timing_out->vic = avi_frame.video_code;
5153 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5154 		timing_out->hdmi_vic = hv_frame.vic;
5155 	}
5156 
5157 	timing_out->h_addressable = mode_in->hdisplay;
5158 	timing_out->h_total = mode_in->htotal;
5159 	timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5160 	timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5161 	timing_out->v_total = mode_in->vtotal;
5162 	timing_out->v_addressable = mode_in->vdisplay;
5163 	timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5164 	timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5165 	timing_out->pix_clk_100hz = mode_in->clock * 10;
5166 
5167 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5168 
5169 	stream->output_color_space = get_output_color_space(timing_out);
5170 
5171 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5172 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5173 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5174 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5175 		    drm_mode_is_420_also(info, mode_in) &&
5176 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5177 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5178 			adjust_colour_depth_from_display_info(timing_out, info);
5179 		}
5180 	}
5181 }
5182 
5183 static void fill_audio_info(struct audio_info *audio_info,
5184 			    const struct drm_connector *drm_connector,
5185 			    const struct dc_sink *dc_sink)
5186 {
5187 	int i = 0;
5188 	int cea_revision = 0;
5189 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5190 
5191 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5192 	audio_info->product_id = edid_caps->product_id;
5193 
5194 	cea_revision = drm_connector->display_info.cea_rev;
5195 
5196 	strscpy(audio_info->display_name,
5197 		edid_caps->display_name,
5198 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5199 
5200 	if (cea_revision >= 3) {
5201 		audio_info->mode_count = edid_caps->audio_mode_count;
5202 
5203 		for (i = 0; i < audio_info->mode_count; ++i) {
5204 			audio_info->modes[i].format_code =
5205 					(enum audio_format_code)
5206 					(edid_caps->audio_modes[i].format_code);
5207 			audio_info->modes[i].channel_count =
5208 					edid_caps->audio_modes[i].channel_count;
5209 			audio_info->modes[i].sample_rates.all =
5210 					edid_caps->audio_modes[i].sample_rate;
5211 			audio_info->modes[i].sample_size =
5212 					edid_caps->audio_modes[i].sample_size;
5213 		}
5214 	}
5215 
5216 	audio_info->flags.all = edid_caps->speaker_flags;
5217 
5218 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5219 	if (drm_connector->latency_present[0]) {
5220 		audio_info->video_latency = drm_connector->video_latency[0];
5221 		audio_info->audio_latency = drm_connector->audio_latency[0];
5222 	}
5223 
5224 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5225 
5226 }
5227 
5228 static void
5229 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5230 				      struct drm_display_mode *dst_mode)
5231 {
5232 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5233 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5234 	dst_mode->crtc_clock = src_mode->crtc_clock;
5235 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5236 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5237 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5238 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5239 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5240 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5241 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5242 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5243 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5244 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5245 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5246 }
5247 
5248 static void
5249 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5250 					const struct drm_display_mode *native_mode,
5251 					bool scale_enabled)
5252 {
5253 	if (scale_enabled) {
5254 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5255 	} else if (native_mode->clock == drm_mode->clock &&
5256 			native_mode->htotal == drm_mode->htotal &&
5257 			native_mode->vtotal == drm_mode->vtotal) {
5258 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5259 	} else {
5260 		/* no scaling nor amdgpu inserted, no need to patch */
5261 	}
5262 }
5263 
5264 static struct dc_sink *
5265 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5266 {
5267 	struct dc_sink_init_data sink_init_data = { 0 };
5268 	struct dc_sink *sink = NULL;
5269 	sink_init_data.link = aconnector->dc_link;
5270 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5271 
5272 	sink = dc_sink_create(&sink_init_data);
5273 	if (!sink) {
5274 		DRM_ERROR("Failed to create sink!\n");
5275 		return NULL;
5276 	}
5277 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5278 
5279 	return sink;
5280 }
5281 
5282 static void set_multisync_trigger_params(
5283 		struct dc_stream_state *stream)
5284 {
5285 	if (stream->triggered_crtc_reset.enabled) {
5286 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5287 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5288 	}
5289 }
5290 
5291 static void set_master_stream(struct dc_stream_state *stream_set[],
5292 			      int stream_count)
5293 {
5294 	int j, highest_rfr = 0, master_stream = 0;
5295 
5296 	for (j = 0;  j < stream_count; j++) {
5297 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5298 			int refresh_rate = 0;
5299 
5300 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5301 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5302 			if (refresh_rate > highest_rfr) {
5303 				highest_rfr = refresh_rate;
5304 				master_stream = j;
5305 			}
5306 		}
5307 	}
5308 	for (j = 0;  j < stream_count; j++) {
5309 		if (stream_set[j])
5310 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5311 	}
5312 }
5313 
5314 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5315 {
5316 	int i = 0;
5317 
5318 	if (context->stream_count < 2)
5319 		return;
5320 	for (i = 0; i < context->stream_count ; i++) {
5321 		if (!context->streams[i])
5322 			continue;
5323 		/*
5324 		 * TODO: add a function to read AMD VSDB bits and set
5325 		 * crtc_sync_master.multi_sync_enabled flag
5326 		 * For now it's set to false
5327 		 */
5328 		set_multisync_trigger_params(context->streams[i]);
5329 	}
5330 	set_master_stream(context->streams, context->stream_count);
5331 }
5332 
5333 static struct drm_display_mode *
5334 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5335 			  bool use_probed_modes)
5336 {
5337 	struct drm_display_mode *m, *m_pref = NULL;
5338 	u16 current_refresh, highest_refresh;
5339 	struct list_head *list_head = use_probed_modes ?
5340 						    &aconnector->base.probed_modes :
5341 						    &aconnector->base.modes;
5342 
5343 	if (aconnector->freesync_vid_base.clock != 0)
5344 		return &aconnector->freesync_vid_base;
5345 
5346 	/* Find the preferred mode */
5347 	list_for_each_entry (m, list_head, head) {
5348 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5349 			m_pref = m;
5350 			break;
5351 		}
5352 	}
5353 
5354 	if (!m_pref) {
5355 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5356 		m_pref = list_first_entry_or_null(
5357 			&aconnector->base.modes, struct drm_display_mode, head);
5358 		if (!m_pref) {
5359 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5360 			return NULL;
5361 		}
5362 	}
5363 
5364 	highest_refresh = drm_mode_vrefresh(m_pref);
5365 
5366 	/*
5367 	 * Find the mode with highest refresh rate with same resolution.
5368 	 * For some monitors, preferred mode is not the mode with highest
5369 	 * supported refresh rate.
5370 	 */
5371 	list_for_each_entry (m, list_head, head) {
5372 		current_refresh  = drm_mode_vrefresh(m);
5373 
5374 		if (m->hdisplay == m_pref->hdisplay &&
5375 		    m->vdisplay == m_pref->vdisplay &&
5376 		    highest_refresh < current_refresh) {
5377 			highest_refresh = current_refresh;
5378 			m_pref = m;
5379 		}
5380 	}
5381 
5382 	aconnector->freesync_vid_base = *m_pref;
5383 	return m_pref;
5384 }
5385 
5386 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5387 				   struct amdgpu_dm_connector *aconnector)
5388 {
5389 	struct drm_display_mode *high_mode;
5390 	int timing_diff;
5391 
5392 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5393 	if (!high_mode || !mode)
5394 		return false;
5395 
5396 	timing_diff = high_mode->vtotal - mode->vtotal;
5397 
5398 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5399 	    high_mode->hdisplay != mode->hdisplay ||
5400 	    high_mode->vdisplay != mode->vdisplay ||
5401 	    high_mode->hsync_start != mode->hsync_start ||
5402 	    high_mode->hsync_end != mode->hsync_end ||
5403 	    high_mode->htotal != mode->htotal ||
5404 	    high_mode->hskew != mode->hskew ||
5405 	    high_mode->vscan != mode->vscan ||
5406 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5407 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5408 		return false;
5409 	else
5410 		return true;
5411 }
5412 
5413 static struct dc_stream_state *
5414 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5415 		       const struct drm_display_mode *drm_mode,
5416 		       const struct dm_connector_state *dm_state,
5417 		       const struct dc_stream_state *old_stream,
5418 		       int requested_bpc)
5419 {
5420 	struct drm_display_mode *preferred_mode = NULL;
5421 	struct drm_connector *drm_connector;
5422 	const struct drm_connector_state *con_state =
5423 		dm_state ? &dm_state->base : NULL;
5424 	struct dc_stream_state *stream = NULL;
5425 	struct drm_display_mode mode = *drm_mode;
5426 	struct drm_display_mode saved_mode;
5427 	struct drm_display_mode *freesync_mode = NULL;
5428 	bool native_mode_found = false;
5429 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5430 	int mode_refresh;
5431 	int preferred_refresh = 0;
5432 #if defined(CONFIG_DRM_AMD_DC_DCN)
5433 	struct dsc_dec_dpcd_caps dsc_caps;
5434 	uint32_t link_bandwidth_kbps;
5435 #endif
5436 	struct dc_sink *sink = NULL;
5437 
5438 	memset(&saved_mode, 0, sizeof(saved_mode));
5439 
5440 	if (aconnector == NULL) {
5441 		DRM_ERROR("aconnector is NULL!\n");
5442 		return stream;
5443 	}
5444 
5445 	drm_connector = &aconnector->base;
5446 
5447 	if (!aconnector->dc_sink) {
5448 		sink = create_fake_sink(aconnector);
5449 		if (!sink)
5450 			return stream;
5451 	} else {
5452 		sink = aconnector->dc_sink;
5453 		dc_sink_retain(sink);
5454 	}
5455 
5456 	stream = dc_create_stream_for_sink(sink);
5457 
5458 	if (stream == NULL) {
5459 		DRM_ERROR("Failed to create stream for sink!\n");
5460 		goto finish;
5461 	}
5462 
5463 	stream->dm_stream_context = aconnector;
5464 
5465 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5466 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5467 
5468 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5469 		/* Search for preferred mode */
5470 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5471 			native_mode_found = true;
5472 			break;
5473 		}
5474 	}
5475 	if (!native_mode_found)
5476 		preferred_mode = list_first_entry_or_null(
5477 				&aconnector->base.modes,
5478 				struct drm_display_mode,
5479 				head);
5480 
5481 	mode_refresh = drm_mode_vrefresh(&mode);
5482 
5483 	if (preferred_mode == NULL) {
5484 		/*
5485 		 * This may not be an error, the use case is when we have no
5486 		 * usermode calls to reset and set mode upon hotplug. In this
5487 		 * case, we call set mode ourselves to restore the previous mode
5488 		 * and the modelist may not be filled in in time.
5489 		 */
5490 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5491 	} else {
5492 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5493 				 is_freesync_video_mode(&mode, aconnector);
5494 		if (recalculate_timing) {
5495 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5496 			saved_mode = mode;
5497 			mode = *freesync_mode;
5498 		} else {
5499 			decide_crtc_timing_for_drm_display_mode(
5500 				&mode, preferred_mode,
5501 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5502 		}
5503 
5504 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5505 	}
5506 
5507 	if (recalculate_timing)
5508 		drm_mode_set_crtcinfo(&saved_mode, 0);
5509 	else
5510 		drm_mode_set_crtcinfo(&mode, 0);
5511 
5512        /*
5513 	* If scaling is enabled and refresh rate didn't change
5514 	* we copy the vic and polarities of the old timings
5515 	*/
5516 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5517 		fill_stream_properties_from_drm_display_mode(
5518 			stream, &mode, &aconnector->base, con_state, NULL,
5519 			requested_bpc);
5520 	else
5521 		fill_stream_properties_from_drm_display_mode(
5522 			stream, &mode, &aconnector->base, con_state, old_stream,
5523 			requested_bpc);
5524 
5525 	stream->timing.flags.DSC = 0;
5526 
5527 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5528 #if defined(CONFIG_DRM_AMD_DC_DCN)
5529 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5530 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5531 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5532 				      &dsc_caps);
5533 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5534 							     dc_link_get_link_cap(aconnector->dc_link));
5535 
5536 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5537 			/* Set DSC policy according to dsc_clock_en */
5538 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5539 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5540 
5541 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5542 						  &dsc_caps,
5543 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5544 						  0,
5545 						  link_bandwidth_kbps,
5546 						  &stream->timing,
5547 						  &stream->timing.dsc_cfg))
5548 				stream->timing.flags.DSC = 1;
5549 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5550 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5551 				stream->timing.flags.DSC = 1;
5552 
5553 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5554 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5555 
5556 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5557 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5558 
5559 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5560 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5561 		}
5562 #endif
5563 	}
5564 
5565 	update_stream_scaling_settings(&mode, dm_state, stream);
5566 
5567 	fill_audio_info(
5568 		&stream->audio_info,
5569 		drm_connector,
5570 		sink);
5571 
5572 	update_stream_signal(stream, sink);
5573 
5574 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5575 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5576 
5577 	if (stream->link->psr_settings.psr_feature_enabled) {
5578 		//
5579 		// should decide stream support vsc sdp colorimetry capability
5580 		// before building vsc info packet
5581 		//
5582 		stream->use_vsc_sdp_for_colorimetry = false;
5583 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5584 			stream->use_vsc_sdp_for_colorimetry =
5585 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5586 		} else {
5587 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5588 				stream->use_vsc_sdp_for_colorimetry = true;
5589 		}
5590 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5591 	}
5592 finish:
5593 	dc_sink_release(sink);
5594 
5595 	return stream;
5596 }
5597 
5598 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5599 {
5600 	drm_crtc_cleanup(crtc);
5601 	kfree(crtc);
5602 }
5603 
5604 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5605 				  struct drm_crtc_state *state)
5606 {
5607 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5608 
5609 	/* TODO Destroy dc_stream objects are stream object is flattened */
5610 	if (cur->stream)
5611 		dc_stream_release(cur->stream);
5612 
5613 
5614 	__drm_atomic_helper_crtc_destroy_state(state);
5615 
5616 
5617 	kfree(state);
5618 }
5619 
5620 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5621 {
5622 	struct dm_crtc_state *state;
5623 
5624 	if (crtc->state)
5625 		dm_crtc_destroy_state(crtc, crtc->state);
5626 
5627 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5628 	if (WARN_ON(!state))
5629 		return;
5630 
5631 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5632 }
5633 
5634 static struct drm_crtc_state *
5635 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5636 {
5637 	struct dm_crtc_state *state, *cur;
5638 
5639 	cur = to_dm_crtc_state(crtc->state);
5640 
5641 	if (WARN_ON(!crtc->state))
5642 		return NULL;
5643 
5644 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5645 	if (!state)
5646 		return NULL;
5647 
5648 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5649 
5650 	if (cur->stream) {
5651 		state->stream = cur->stream;
5652 		dc_stream_retain(state->stream);
5653 	}
5654 
5655 	state->active_planes = cur->active_planes;
5656 	state->vrr_infopacket = cur->vrr_infopacket;
5657 	state->abm_level = cur->abm_level;
5658 	state->vrr_supported = cur->vrr_supported;
5659 	state->freesync_config = cur->freesync_config;
5660 	state->cm_has_degamma = cur->cm_has_degamma;
5661 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5662 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5663 
5664 	return &state->base;
5665 }
5666 
5667 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5668 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5669 {
5670 	crtc_debugfs_init(crtc);
5671 
5672 	return 0;
5673 }
5674 #endif
5675 
5676 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5677 {
5678 	enum dc_irq_source irq_source;
5679 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5680 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5681 	int rc;
5682 
5683 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5684 
5685 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5686 
5687 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5688 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5689 	return rc;
5690 }
5691 
5692 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5693 {
5694 	enum dc_irq_source irq_source;
5695 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5696 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5697 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5698 #if defined(CONFIG_DRM_AMD_DC_DCN)
5699 	struct amdgpu_display_manager *dm = &adev->dm;
5700 	unsigned long flags;
5701 #endif
5702 	int rc = 0;
5703 
5704 	if (enable) {
5705 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5706 		if (amdgpu_dm_vrr_active(acrtc_state))
5707 			rc = dm_set_vupdate_irq(crtc, true);
5708 	} else {
5709 		/* vblank irq off -> vupdate irq off */
5710 		rc = dm_set_vupdate_irq(crtc, false);
5711 	}
5712 
5713 	if (rc)
5714 		return rc;
5715 
5716 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5717 
5718 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5719 		return -EBUSY;
5720 
5721 	if (amdgpu_in_reset(adev))
5722 		return 0;
5723 
5724 #if defined(CONFIG_DRM_AMD_DC_DCN)
5725 	spin_lock_irqsave(&dm->vblank_lock, flags);
5726 	dm->vblank_workqueue->dm = dm;
5727 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5728 	dm->vblank_workqueue->enable = enable;
5729 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5730 	schedule_work(&dm->vblank_workqueue->mall_work);
5731 #endif
5732 
5733 	return 0;
5734 }
5735 
5736 static int dm_enable_vblank(struct drm_crtc *crtc)
5737 {
5738 	return dm_set_vblank(crtc, true);
5739 }
5740 
5741 static void dm_disable_vblank(struct drm_crtc *crtc)
5742 {
5743 	dm_set_vblank(crtc, false);
5744 }
5745 
5746 /* Implemented only the options currently availible for the driver */
5747 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5748 	.reset = dm_crtc_reset_state,
5749 	.destroy = amdgpu_dm_crtc_destroy,
5750 	.set_config = drm_atomic_helper_set_config,
5751 	.page_flip = drm_atomic_helper_page_flip,
5752 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5753 	.atomic_destroy_state = dm_crtc_destroy_state,
5754 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5755 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5756 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5757 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5758 	.enable_vblank = dm_enable_vblank,
5759 	.disable_vblank = dm_disable_vblank,
5760 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5761 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5762 	.late_register = amdgpu_dm_crtc_late_register,
5763 #endif
5764 };
5765 
5766 static enum drm_connector_status
5767 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5768 {
5769 	bool connected;
5770 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5771 
5772 	/*
5773 	 * Notes:
5774 	 * 1. This interface is NOT called in context of HPD irq.
5775 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5776 	 * makes it a bad place for *any* MST-related activity.
5777 	 */
5778 
5779 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5780 	    !aconnector->fake_enable)
5781 		connected = (aconnector->dc_sink != NULL);
5782 	else
5783 		connected = (aconnector->base.force == DRM_FORCE_ON);
5784 
5785 	update_subconnector_property(aconnector);
5786 
5787 	return (connected ? connector_status_connected :
5788 			connector_status_disconnected);
5789 }
5790 
5791 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5792 					    struct drm_connector_state *connector_state,
5793 					    struct drm_property *property,
5794 					    uint64_t val)
5795 {
5796 	struct drm_device *dev = connector->dev;
5797 	struct amdgpu_device *adev = drm_to_adev(dev);
5798 	struct dm_connector_state *dm_old_state =
5799 		to_dm_connector_state(connector->state);
5800 	struct dm_connector_state *dm_new_state =
5801 		to_dm_connector_state(connector_state);
5802 
5803 	int ret = -EINVAL;
5804 
5805 	if (property == dev->mode_config.scaling_mode_property) {
5806 		enum amdgpu_rmx_type rmx_type;
5807 
5808 		switch (val) {
5809 		case DRM_MODE_SCALE_CENTER:
5810 			rmx_type = RMX_CENTER;
5811 			break;
5812 		case DRM_MODE_SCALE_ASPECT:
5813 			rmx_type = RMX_ASPECT;
5814 			break;
5815 		case DRM_MODE_SCALE_FULLSCREEN:
5816 			rmx_type = RMX_FULL;
5817 			break;
5818 		case DRM_MODE_SCALE_NONE:
5819 		default:
5820 			rmx_type = RMX_OFF;
5821 			break;
5822 		}
5823 
5824 		if (dm_old_state->scaling == rmx_type)
5825 			return 0;
5826 
5827 		dm_new_state->scaling = rmx_type;
5828 		ret = 0;
5829 	} else if (property == adev->mode_info.underscan_hborder_property) {
5830 		dm_new_state->underscan_hborder = val;
5831 		ret = 0;
5832 	} else if (property == adev->mode_info.underscan_vborder_property) {
5833 		dm_new_state->underscan_vborder = val;
5834 		ret = 0;
5835 	} else if (property == adev->mode_info.underscan_property) {
5836 		dm_new_state->underscan_enable = val;
5837 		ret = 0;
5838 	} else if (property == adev->mode_info.abm_level_property) {
5839 		dm_new_state->abm_level = val;
5840 		ret = 0;
5841 	}
5842 
5843 	return ret;
5844 }
5845 
5846 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5847 					    const struct drm_connector_state *state,
5848 					    struct drm_property *property,
5849 					    uint64_t *val)
5850 {
5851 	struct drm_device *dev = connector->dev;
5852 	struct amdgpu_device *adev = drm_to_adev(dev);
5853 	struct dm_connector_state *dm_state =
5854 		to_dm_connector_state(state);
5855 	int ret = -EINVAL;
5856 
5857 	if (property == dev->mode_config.scaling_mode_property) {
5858 		switch (dm_state->scaling) {
5859 		case RMX_CENTER:
5860 			*val = DRM_MODE_SCALE_CENTER;
5861 			break;
5862 		case RMX_ASPECT:
5863 			*val = DRM_MODE_SCALE_ASPECT;
5864 			break;
5865 		case RMX_FULL:
5866 			*val = DRM_MODE_SCALE_FULLSCREEN;
5867 			break;
5868 		case RMX_OFF:
5869 		default:
5870 			*val = DRM_MODE_SCALE_NONE;
5871 			break;
5872 		}
5873 		ret = 0;
5874 	} else if (property == adev->mode_info.underscan_hborder_property) {
5875 		*val = dm_state->underscan_hborder;
5876 		ret = 0;
5877 	} else if (property == adev->mode_info.underscan_vborder_property) {
5878 		*val = dm_state->underscan_vborder;
5879 		ret = 0;
5880 	} else if (property == adev->mode_info.underscan_property) {
5881 		*val = dm_state->underscan_enable;
5882 		ret = 0;
5883 	} else if (property == adev->mode_info.abm_level_property) {
5884 		*val = dm_state->abm_level;
5885 		ret = 0;
5886 	}
5887 
5888 	return ret;
5889 }
5890 
5891 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5892 {
5893 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5894 
5895 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5896 }
5897 
5898 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5899 {
5900 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5901 	const struct dc_link *link = aconnector->dc_link;
5902 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5903 	struct amdgpu_display_manager *dm = &adev->dm;
5904 
5905 	/*
5906 	 * Call only if mst_mgr was iniitalized before since it's not done
5907 	 * for all connector types.
5908 	 */
5909 	if (aconnector->mst_mgr.dev)
5910 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5911 
5912 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5913 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5914 
5915 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5916 	    link->type != dc_connection_none &&
5917 	    dm->backlight_dev) {
5918 		backlight_device_unregister(dm->backlight_dev);
5919 		dm->backlight_dev = NULL;
5920 	}
5921 #endif
5922 
5923 	if (aconnector->dc_em_sink)
5924 		dc_sink_release(aconnector->dc_em_sink);
5925 	aconnector->dc_em_sink = NULL;
5926 	if (aconnector->dc_sink)
5927 		dc_sink_release(aconnector->dc_sink);
5928 	aconnector->dc_sink = NULL;
5929 
5930 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5931 	drm_connector_unregister(connector);
5932 	drm_connector_cleanup(connector);
5933 	if (aconnector->i2c) {
5934 		i2c_del_adapter(&aconnector->i2c->base);
5935 		kfree(aconnector->i2c);
5936 	}
5937 	kfree(aconnector->dm_dp_aux.aux.name);
5938 
5939 	kfree(connector);
5940 }
5941 
5942 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5943 {
5944 	struct dm_connector_state *state =
5945 		to_dm_connector_state(connector->state);
5946 
5947 	if (connector->state)
5948 		__drm_atomic_helper_connector_destroy_state(connector->state);
5949 
5950 	kfree(state);
5951 
5952 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5953 
5954 	if (state) {
5955 		state->scaling = RMX_OFF;
5956 		state->underscan_enable = false;
5957 		state->underscan_hborder = 0;
5958 		state->underscan_vborder = 0;
5959 		state->base.max_requested_bpc = 8;
5960 		state->vcpi_slots = 0;
5961 		state->pbn = 0;
5962 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5963 			state->abm_level = amdgpu_dm_abm_level;
5964 
5965 		__drm_atomic_helper_connector_reset(connector, &state->base);
5966 	}
5967 }
5968 
5969 struct drm_connector_state *
5970 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5971 {
5972 	struct dm_connector_state *state =
5973 		to_dm_connector_state(connector->state);
5974 
5975 	struct dm_connector_state *new_state =
5976 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5977 
5978 	if (!new_state)
5979 		return NULL;
5980 
5981 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5982 
5983 	new_state->freesync_capable = state->freesync_capable;
5984 	new_state->abm_level = state->abm_level;
5985 	new_state->scaling = state->scaling;
5986 	new_state->underscan_enable = state->underscan_enable;
5987 	new_state->underscan_hborder = state->underscan_hborder;
5988 	new_state->underscan_vborder = state->underscan_vborder;
5989 	new_state->vcpi_slots = state->vcpi_slots;
5990 	new_state->pbn = state->pbn;
5991 	return &new_state->base;
5992 }
5993 
5994 static int
5995 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5996 {
5997 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5998 		to_amdgpu_dm_connector(connector);
5999 	int r;
6000 
6001 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6002 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6003 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6004 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6005 		if (r)
6006 			return r;
6007 	}
6008 
6009 #if defined(CONFIG_DEBUG_FS)
6010 	connector_debugfs_init(amdgpu_dm_connector);
6011 #endif
6012 
6013 	return 0;
6014 }
6015 
6016 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6017 	.reset = amdgpu_dm_connector_funcs_reset,
6018 	.detect = amdgpu_dm_connector_detect,
6019 	.fill_modes = drm_helper_probe_single_connector_modes,
6020 	.destroy = amdgpu_dm_connector_destroy,
6021 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6022 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6023 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6024 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6025 	.late_register = amdgpu_dm_connector_late_register,
6026 	.early_unregister = amdgpu_dm_connector_unregister
6027 };
6028 
6029 static int get_modes(struct drm_connector *connector)
6030 {
6031 	return amdgpu_dm_connector_get_modes(connector);
6032 }
6033 
6034 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6035 {
6036 	struct dc_sink_init_data init_params = {
6037 			.link = aconnector->dc_link,
6038 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6039 	};
6040 	struct edid *edid;
6041 
6042 	if (!aconnector->base.edid_blob_ptr) {
6043 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6044 				aconnector->base.name);
6045 
6046 		aconnector->base.force = DRM_FORCE_OFF;
6047 		aconnector->base.override_edid = false;
6048 		return;
6049 	}
6050 
6051 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6052 
6053 	aconnector->edid = edid;
6054 
6055 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6056 		aconnector->dc_link,
6057 		(uint8_t *)edid,
6058 		(edid->extensions + 1) * EDID_LENGTH,
6059 		&init_params);
6060 
6061 	if (aconnector->base.force == DRM_FORCE_ON) {
6062 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6063 		aconnector->dc_link->local_sink :
6064 		aconnector->dc_em_sink;
6065 		dc_sink_retain(aconnector->dc_sink);
6066 	}
6067 }
6068 
6069 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6070 {
6071 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6072 
6073 	/*
6074 	 * In case of headless boot with force on for DP managed connector
6075 	 * Those settings have to be != 0 to get initial modeset
6076 	 */
6077 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6078 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6079 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6080 	}
6081 
6082 
6083 	aconnector->base.override_edid = true;
6084 	create_eml_sink(aconnector);
6085 }
6086 
6087 static struct dc_stream_state *
6088 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6089 				const struct drm_display_mode *drm_mode,
6090 				const struct dm_connector_state *dm_state,
6091 				const struct dc_stream_state *old_stream)
6092 {
6093 	struct drm_connector *connector = &aconnector->base;
6094 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6095 	struct dc_stream_state *stream;
6096 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6097 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6098 	enum dc_status dc_result = DC_OK;
6099 
6100 	do {
6101 		stream = create_stream_for_sink(aconnector, drm_mode,
6102 						dm_state, old_stream,
6103 						requested_bpc);
6104 		if (stream == NULL) {
6105 			DRM_ERROR("Failed to create stream for sink!\n");
6106 			break;
6107 		}
6108 
6109 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6110 
6111 		if (dc_result != DC_OK) {
6112 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6113 				      drm_mode->hdisplay,
6114 				      drm_mode->vdisplay,
6115 				      drm_mode->clock,
6116 				      dc_result,
6117 				      dc_status_to_str(dc_result));
6118 
6119 			dc_stream_release(stream);
6120 			stream = NULL;
6121 			requested_bpc -= 2; /* lower bpc to retry validation */
6122 		}
6123 
6124 	} while (stream == NULL && requested_bpc >= 6);
6125 
6126 	return stream;
6127 }
6128 
6129 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6130 				   struct drm_display_mode *mode)
6131 {
6132 	int result = MODE_ERROR;
6133 	struct dc_sink *dc_sink;
6134 	/* TODO: Unhardcode stream count */
6135 	struct dc_stream_state *stream;
6136 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6137 
6138 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6139 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6140 		return result;
6141 
6142 	/*
6143 	 * Only run this the first time mode_valid is called to initilialize
6144 	 * EDID mgmt
6145 	 */
6146 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6147 		!aconnector->dc_em_sink)
6148 		handle_edid_mgmt(aconnector);
6149 
6150 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6151 
6152 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6153 				aconnector->base.force != DRM_FORCE_ON) {
6154 		DRM_ERROR("dc_sink is NULL!\n");
6155 		goto fail;
6156 	}
6157 
6158 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6159 	if (stream) {
6160 		dc_stream_release(stream);
6161 		result = MODE_OK;
6162 	}
6163 
6164 fail:
6165 	/* TODO: error handling*/
6166 	return result;
6167 }
6168 
6169 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6170 				struct dc_info_packet *out)
6171 {
6172 	struct hdmi_drm_infoframe frame;
6173 	unsigned char buf[30]; /* 26 + 4 */
6174 	ssize_t len;
6175 	int ret, i;
6176 
6177 	memset(out, 0, sizeof(*out));
6178 
6179 	if (!state->hdr_output_metadata)
6180 		return 0;
6181 
6182 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6183 	if (ret)
6184 		return ret;
6185 
6186 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6187 	if (len < 0)
6188 		return (int)len;
6189 
6190 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6191 	if (len != 30)
6192 		return -EINVAL;
6193 
6194 	/* Prepare the infopacket for DC. */
6195 	switch (state->connector->connector_type) {
6196 	case DRM_MODE_CONNECTOR_HDMIA:
6197 		out->hb0 = 0x87; /* type */
6198 		out->hb1 = 0x01; /* version */
6199 		out->hb2 = 0x1A; /* length */
6200 		out->sb[0] = buf[3]; /* checksum */
6201 		i = 1;
6202 		break;
6203 
6204 	case DRM_MODE_CONNECTOR_DisplayPort:
6205 	case DRM_MODE_CONNECTOR_eDP:
6206 		out->hb0 = 0x00; /* sdp id, zero */
6207 		out->hb1 = 0x87; /* type */
6208 		out->hb2 = 0x1D; /* payload len - 1 */
6209 		out->hb3 = (0x13 << 2); /* sdp version */
6210 		out->sb[0] = 0x01; /* version */
6211 		out->sb[1] = 0x1A; /* length */
6212 		i = 2;
6213 		break;
6214 
6215 	default:
6216 		return -EINVAL;
6217 	}
6218 
6219 	memcpy(&out->sb[i], &buf[4], 26);
6220 	out->valid = true;
6221 
6222 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6223 		       sizeof(out->sb), false);
6224 
6225 	return 0;
6226 }
6227 
6228 static bool
6229 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6230 			  const struct drm_connector_state *new_state)
6231 {
6232 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6233 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6234 
6235 	if (old_blob != new_blob) {
6236 		if (old_blob && new_blob &&
6237 		    old_blob->length == new_blob->length)
6238 			return memcmp(old_blob->data, new_blob->data,
6239 				      old_blob->length);
6240 
6241 		return true;
6242 	}
6243 
6244 	return false;
6245 }
6246 
6247 static int
6248 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6249 				 struct drm_atomic_state *state)
6250 {
6251 	struct drm_connector_state *new_con_state =
6252 		drm_atomic_get_new_connector_state(state, conn);
6253 	struct drm_connector_state *old_con_state =
6254 		drm_atomic_get_old_connector_state(state, conn);
6255 	struct drm_crtc *crtc = new_con_state->crtc;
6256 	struct drm_crtc_state *new_crtc_state;
6257 	int ret;
6258 
6259 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6260 
6261 	if (!crtc)
6262 		return 0;
6263 
6264 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6265 		struct dc_info_packet hdr_infopacket;
6266 
6267 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6268 		if (ret)
6269 			return ret;
6270 
6271 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6272 		if (IS_ERR(new_crtc_state))
6273 			return PTR_ERR(new_crtc_state);
6274 
6275 		/*
6276 		 * DC considers the stream backends changed if the
6277 		 * static metadata changes. Forcing the modeset also
6278 		 * gives a simple way for userspace to switch from
6279 		 * 8bpc to 10bpc when setting the metadata to enter
6280 		 * or exit HDR.
6281 		 *
6282 		 * Changing the static metadata after it's been
6283 		 * set is permissible, however. So only force a
6284 		 * modeset if we're entering or exiting HDR.
6285 		 */
6286 		new_crtc_state->mode_changed =
6287 			!old_con_state->hdr_output_metadata ||
6288 			!new_con_state->hdr_output_metadata;
6289 	}
6290 
6291 	return 0;
6292 }
6293 
6294 static const struct drm_connector_helper_funcs
6295 amdgpu_dm_connector_helper_funcs = {
6296 	/*
6297 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6298 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6299 	 * are missing after user start lightdm. So we need to renew modes list.
6300 	 * in get_modes call back, not just return the modes count
6301 	 */
6302 	.get_modes = get_modes,
6303 	.mode_valid = amdgpu_dm_connector_mode_valid,
6304 	.atomic_check = amdgpu_dm_connector_atomic_check,
6305 };
6306 
6307 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6308 {
6309 }
6310 
6311 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6312 {
6313 	struct drm_atomic_state *state = new_crtc_state->state;
6314 	struct drm_plane *plane;
6315 	int num_active = 0;
6316 
6317 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6318 		struct drm_plane_state *new_plane_state;
6319 
6320 		/* Cursor planes are "fake". */
6321 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6322 			continue;
6323 
6324 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6325 
6326 		if (!new_plane_state) {
6327 			/*
6328 			 * The plane is enable on the CRTC and hasn't changed
6329 			 * state. This means that it previously passed
6330 			 * validation and is therefore enabled.
6331 			 */
6332 			num_active += 1;
6333 			continue;
6334 		}
6335 
6336 		/* We need a framebuffer to be considered enabled. */
6337 		num_active += (new_plane_state->fb != NULL);
6338 	}
6339 
6340 	return num_active;
6341 }
6342 
6343 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6344 					 struct drm_crtc_state *new_crtc_state)
6345 {
6346 	struct dm_crtc_state *dm_new_crtc_state =
6347 		to_dm_crtc_state(new_crtc_state);
6348 
6349 	dm_new_crtc_state->active_planes = 0;
6350 
6351 	if (!dm_new_crtc_state->stream)
6352 		return;
6353 
6354 	dm_new_crtc_state->active_planes =
6355 		count_crtc_active_planes(new_crtc_state);
6356 }
6357 
6358 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6359 				       struct drm_atomic_state *state)
6360 {
6361 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6362 									  crtc);
6363 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6364 	struct dc *dc = adev->dm.dc;
6365 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6366 	int ret = -EINVAL;
6367 
6368 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6369 
6370 	dm_update_crtc_active_planes(crtc, crtc_state);
6371 
6372 	if (unlikely(!dm_crtc_state->stream &&
6373 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6374 		WARN_ON(1);
6375 		return ret;
6376 	}
6377 
6378 	/*
6379 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6380 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6381 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6382 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6383 	 */
6384 	if (crtc_state->enable &&
6385 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6386 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6387 		return -EINVAL;
6388 	}
6389 
6390 	/* In some use cases, like reset, no stream is attached */
6391 	if (!dm_crtc_state->stream)
6392 		return 0;
6393 
6394 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6395 		return 0;
6396 
6397 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6398 	return ret;
6399 }
6400 
6401 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6402 				      const struct drm_display_mode *mode,
6403 				      struct drm_display_mode *adjusted_mode)
6404 {
6405 	return true;
6406 }
6407 
6408 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6409 	.disable = dm_crtc_helper_disable,
6410 	.atomic_check = dm_crtc_helper_atomic_check,
6411 	.mode_fixup = dm_crtc_helper_mode_fixup,
6412 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6413 };
6414 
6415 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6416 {
6417 
6418 }
6419 
6420 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6421 {
6422 	switch (display_color_depth) {
6423 		case COLOR_DEPTH_666:
6424 			return 6;
6425 		case COLOR_DEPTH_888:
6426 			return 8;
6427 		case COLOR_DEPTH_101010:
6428 			return 10;
6429 		case COLOR_DEPTH_121212:
6430 			return 12;
6431 		case COLOR_DEPTH_141414:
6432 			return 14;
6433 		case COLOR_DEPTH_161616:
6434 			return 16;
6435 		default:
6436 			break;
6437 		}
6438 	return 0;
6439 }
6440 
6441 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6442 					  struct drm_crtc_state *crtc_state,
6443 					  struct drm_connector_state *conn_state)
6444 {
6445 	struct drm_atomic_state *state = crtc_state->state;
6446 	struct drm_connector *connector = conn_state->connector;
6447 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6448 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6449 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6450 	struct drm_dp_mst_topology_mgr *mst_mgr;
6451 	struct drm_dp_mst_port *mst_port;
6452 	enum dc_color_depth color_depth;
6453 	int clock, bpp = 0;
6454 	bool is_y420 = false;
6455 
6456 	if (!aconnector->port || !aconnector->dc_sink)
6457 		return 0;
6458 
6459 	mst_port = aconnector->port;
6460 	mst_mgr = &aconnector->mst_port->mst_mgr;
6461 
6462 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6463 		return 0;
6464 
6465 	if (!state->duplicated) {
6466 		int max_bpc = conn_state->max_requested_bpc;
6467 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6468 				aconnector->force_yuv420_output;
6469 		color_depth = convert_color_depth_from_display_info(connector,
6470 								    is_y420,
6471 								    max_bpc);
6472 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6473 		clock = adjusted_mode->clock;
6474 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6475 	}
6476 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6477 									   mst_mgr,
6478 									   mst_port,
6479 									   dm_new_connector_state->pbn,
6480 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6481 	if (dm_new_connector_state->vcpi_slots < 0) {
6482 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6483 		return dm_new_connector_state->vcpi_slots;
6484 	}
6485 	return 0;
6486 }
6487 
6488 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6489 	.disable = dm_encoder_helper_disable,
6490 	.atomic_check = dm_encoder_helper_atomic_check
6491 };
6492 
6493 #if defined(CONFIG_DRM_AMD_DC_DCN)
6494 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6495 					    struct dc_state *dc_state)
6496 {
6497 	struct dc_stream_state *stream = NULL;
6498 	struct drm_connector *connector;
6499 	struct drm_connector_state *new_con_state, *old_con_state;
6500 	struct amdgpu_dm_connector *aconnector;
6501 	struct dm_connector_state *dm_conn_state;
6502 	int i, j, clock, bpp;
6503 	int vcpi, pbn_div, pbn = 0;
6504 
6505 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6506 
6507 		aconnector = to_amdgpu_dm_connector(connector);
6508 
6509 		if (!aconnector->port)
6510 			continue;
6511 
6512 		if (!new_con_state || !new_con_state->crtc)
6513 			continue;
6514 
6515 		dm_conn_state = to_dm_connector_state(new_con_state);
6516 
6517 		for (j = 0; j < dc_state->stream_count; j++) {
6518 			stream = dc_state->streams[j];
6519 			if (!stream)
6520 				continue;
6521 
6522 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6523 				break;
6524 
6525 			stream = NULL;
6526 		}
6527 
6528 		if (!stream)
6529 			continue;
6530 
6531 		if (stream->timing.flags.DSC != 1) {
6532 			drm_dp_mst_atomic_enable_dsc(state,
6533 						     aconnector->port,
6534 						     dm_conn_state->pbn,
6535 						     0,
6536 						     false);
6537 			continue;
6538 		}
6539 
6540 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6541 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6542 		clock = stream->timing.pix_clk_100hz / 10;
6543 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6544 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6545 						    aconnector->port,
6546 						    pbn, pbn_div,
6547 						    true);
6548 		if (vcpi < 0)
6549 			return vcpi;
6550 
6551 		dm_conn_state->pbn = pbn;
6552 		dm_conn_state->vcpi_slots = vcpi;
6553 	}
6554 	return 0;
6555 }
6556 #endif
6557 
6558 static void dm_drm_plane_reset(struct drm_plane *plane)
6559 {
6560 	struct dm_plane_state *amdgpu_state = NULL;
6561 
6562 	if (plane->state)
6563 		plane->funcs->atomic_destroy_state(plane, plane->state);
6564 
6565 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6566 	WARN_ON(amdgpu_state == NULL);
6567 
6568 	if (amdgpu_state)
6569 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6570 }
6571 
6572 static struct drm_plane_state *
6573 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6574 {
6575 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6576 
6577 	old_dm_plane_state = to_dm_plane_state(plane->state);
6578 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6579 	if (!dm_plane_state)
6580 		return NULL;
6581 
6582 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6583 
6584 	if (old_dm_plane_state->dc_state) {
6585 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6586 		dc_plane_state_retain(dm_plane_state->dc_state);
6587 	}
6588 
6589 	return &dm_plane_state->base;
6590 }
6591 
6592 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6593 				struct drm_plane_state *state)
6594 {
6595 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6596 
6597 	if (dm_plane_state->dc_state)
6598 		dc_plane_state_release(dm_plane_state->dc_state);
6599 
6600 	drm_atomic_helper_plane_destroy_state(plane, state);
6601 }
6602 
6603 static const struct drm_plane_funcs dm_plane_funcs = {
6604 	.update_plane	= drm_atomic_helper_update_plane,
6605 	.disable_plane	= drm_atomic_helper_disable_plane,
6606 	.destroy	= drm_primary_helper_destroy,
6607 	.reset = dm_drm_plane_reset,
6608 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6609 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6610 	.format_mod_supported = dm_plane_format_mod_supported,
6611 };
6612 
6613 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6614 				      struct drm_plane_state *new_state)
6615 {
6616 	struct amdgpu_framebuffer *afb;
6617 	struct drm_gem_object *obj;
6618 	struct amdgpu_device *adev;
6619 	struct amdgpu_bo *rbo;
6620 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6621 	struct list_head list;
6622 	struct ttm_validate_buffer tv;
6623 	struct ww_acquire_ctx ticket;
6624 	uint32_t domain;
6625 	int r;
6626 
6627 	if (!new_state->fb) {
6628 		DRM_DEBUG_DRIVER("No FB bound\n");
6629 		return 0;
6630 	}
6631 
6632 	afb = to_amdgpu_framebuffer(new_state->fb);
6633 	obj = new_state->fb->obj[0];
6634 	rbo = gem_to_amdgpu_bo(obj);
6635 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6636 	INIT_LIST_HEAD(&list);
6637 
6638 	tv.bo = &rbo->tbo;
6639 	tv.num_shared = 1;
6640 	list_add(&tv.head, &list);
6641 
6642 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6643 	if (r) {
6644 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6645 		return r;
6646 	}
6647 
6648 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6649 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6650 	else
6651 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6652 
6653 	r = amdgpu_bo_pin(rbo, domain);
6654 	if (unlikely(r != 0)) {
6655 		if (r != -ERESTARTSYS)
6656 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6657 		ttm_eu_backoff_reservation(&ticket, &list);
6658 		return r;
6659 	}
6660 
6661 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6662 	if (unlikely(r != 0)) {
6663 		amdgpu_bo_unpin(rbo);
6664 		ttm_eu_backoff_reservation(&ticket, &list);
6665 		DRM_ERROR("%p bind failed\n", rbo);
6666 		return r;
6667 	}
6668 
6669 	ttm_eu_backoff_reservation(&ticket, &list);
6670 
6671 	afb->address = amdgpu_bo_gpu_offset(rbo);
6672 
6673 	amdgpu_bo_ref(rbo);
6674 
6675 	/**
6676 	 * We don't do surface updates on planes that have been newly created,
6677 	 * but we also don't have the afb->address during atomic check.
6678 	 *
6679 	 * Fill in buffer attributes depending on the address here, but only on
6680 	 * newly created planes since they're not being used by DC yet and this
6681 	 * won't modify global state.
6682 	 */
6683 	dm_plane_state_old = to_dm_plane_state(plane->state);
6684 	dm_plane_state_new = to_dm_plane_state(new_state);
6685 
6686 	if (dm_plane_state_new->dc_state &&
6687 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6688 		struct dc_plane_state *plane_state =
6689 			dm_plane_state_new->dc_state;
6690 		bool force_disable_dcc = !plane_state->dcc.enable;
6691 
6692 		fill_plane_buffer_attributes(
6693 			adev, afb, plane_state->format, plane_state->rotation,
6694 			afb->tiling_flags,
6695 			&plane_state->tiling_info, &plane_state->plane_size,
6696 			&plane_state->dcc, &plane_state->address,
6697 			afb->tmz_surface, force_disable_dcc);
6698 	}
6699 
6700 	return 0;
6701 }
6702 
6703 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6704 				       struct drm_plane_state *old_state)
6705 {
6706 	struct amdgpu_bo *rbo;
6707 	int r;
6708 
6709 	if (!old_state->fb)
6710 		return;
6711 
6712 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6713 	r = amdgpu_bo_reserve(rbo, false);
6714 	if (unlikely(r)) {
6715 		DRM_ERROR("failed to reserve rbo before unpin\n");
6716 		return;
6717 	}
6718 
6719 	amdgpu_bo_unpin(rbo);
6720 	amdgpu_bo_unreserve(rbo);
6721 	amdgpu_bo_unref(&rbo);
6722 }
6723 
6724 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6725 				       struct drm_crtc_state *new_crtc_state)
6726 {
6727 	struct drm_framebuffer *fb = state->fb;
6728 	int min_downscale, max_upscale;
6729 	int min_scale = 0;
6730 	int max_scale = INT_MAX;
6731 
6732 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6733 	if (fb && state->crtc) {
6734 		/* Validate viewport to cover the case when only the position changes */
6735 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6736 			int viewport_width = state->crtc_w;
6737 			int viewport_height = state->crtc_h;
6738 
6739 			if (state->crtc_x < 0)
6740 				viewport_width += state->crtc_x;
6741 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6742 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6743 
6744 			if (state->crtc_y < 0)
6745 				viewport_height += state->crtc_y;
6746 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6747 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6748 
6749 			if (viewport_width < 0 || viewport_height < 0) {
6750 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6751 				return -EINVAL;
6752 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6753 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6754 				return -EINVAL;
6755 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6756 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6757 				return -EINVAL;
6758 			}
6759 
6760 		}
6761 
6762 		/* Get min/max allowed scaling factors from plane caps. */
6763 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6764 					     &min_downscale, &max_upscale);
6765 		/*
6766 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6767 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6768 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6769 		 */
6770 		min_scale = (1000 << 16) / max_upscale;
6771 		max_scale = (1000 << 16) / min_downscale;
6772 	}
6773 
6774 	return drm_atomic_helper_check_plane_state(
6775 		state, new_crtc_state, min_scale, max_scale, true, true);
6776 }
6777 
6778 static int dm_plane_atomic_check(struct drm_plane *plane,
6779 				 struct drm_atomic_state *state)
6780 {
6781 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6782 										 plane);
6783 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6784 	struct dc *dc = adev->dm.dc;
6785 	struct dm_plane_state *dm_plane_state;
6786 	struct dc_scaling_info scaling_info;
6787 	struct drm_crtc_state *new_crtc_state;
6788 	int ret;
6789 
6790 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6791 
6792 	dm_plane_state = to_dm_plane_state(new_plane_state);
6793 
6794 	if (!dm_plane_state->dc_state)
6795 		return 0;
6796 
6797 	new_crtc_state =
6798 		drm_atomic_get_new_crtc_state(state,
6799 					      new_plane_state->crtc);
6800 	if (!new_crtc_state)
6801 		return -EINVAL;
6802 
6803 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6804 	if (ret)
6805 		return ret;
6806 
6807 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6808 	if (ret)
6809 		return ret;
6810 
6811 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6812 		return 0;
6813 
6814 	return -EINVAL;
6815 }
6816 
6817 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6818 				       struct drm_atomic_state *state)
6819 {
6820 	/* Only support async updates on cursor planes. */
6821 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6822 		return -EINVAL;
6823 
6824 	return 0;
6825 }
6826 
6827 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6828 					 struct drm_atomic_state *state)
6829 {
6830 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6831 									   plane);
6832 	struct drm_plane_state *old_state =
6833 		drm_atomic_get_old_plane_state(state, plane);
6834 
6835 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6836 
6837 	swap(plane->state->fb, new_state->fb);
6838 
6839 	plane->state->src_x = new_state->src_x;
6840 	plane->state->src_y = new_state->src_y;
6841 	plane->state->src_w = new_state->src_w;
6842 	plane->state->src_h = new_state->src_h;
6843 	plane->state->crtc_x = new_state->crtc_x;
6844 	plane->state->crtc_y = new_state->crtc_y;
6845 	plane->state->crtc_w = new_state->crtc_w;
6846 	plane->state->crtc_h = new_state->crtc_h;
6847 
6848 	handle_cursor_update(plane, old_state);
6849 }
6850 
6851 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6852 	.prepare_fb = dm_plane_helper_prepare_fb,
6853 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6854 	.atomic_check = dm_plane_atomic_check,
6855 	.atomic_async_check = dm_plane_atomic_async_check,
6856 	.atomic_async_update = dm_plane_atomic_async_update
6857 };
6858 
6859 /*
6860  * TODO: these are currently initialized to rgb formats only.
6861  * For future use cases we should either initialize them dynamically based on
6862  * plane capabilities, or initialize this array to all formats, so internal drm
6863  * check will succeed, and let DC implement proper check
6864  */
6865 static const uint32_t rgb_formats[] = {
6866 	DRM_FORMAT_XRGB8888,
6867 	DRM_FORMAT_ARGB8888,
6868 	DRM_FORMAT_RGBA8888,
6869 	DRM_FORMAT_XRGB2101010,
6870 	DRM_FORMAT_XBGR2101010,
6871 	DRM_FORMAT_ARGB2101010,
6872 	DRM_FORMAT_ABGR2101010,
6873 	DRM_FORMAT_XBGR8888,
6874 	DRM_FORMAT_ABGR8888,
6875 	DRM_FORMAT_RGB565,
6876 };
6877 
6878 static const uint32_t overlay_formats[] = {
6879 	DRM_FORMAT_XRGB8888,
6880 	DRM_FORMAT_ARGB8888,
6881 	DRM_FORMAT_RGBA8888,
6882 	DRM_FORMAT_XBGR8888,
6883 	DRM_FORMAT_ABGR8888,
6884 	DRM_FORMAT_RGB565
6885 };
6886 
6887 static const u32 cursor_formats[] = {
6888 	DRM_FORMAT_ARGB8888
6889 };
6890 
6891 static int get_plane_formats(const struct drm_plane *plane,
6892 			     const struct dc_plane_cap *plane_cap,
6893 			     uint32_t *formats, int max_formats)
6894 {
6895 	int i, num_formats = 0;
6896 
6897 	/*
6898 	 * TODO: Query support for each group of formats directly from
6899 	 * DC plane caps. This will require adding more formats to the
6900 	 * caps list.
6901 	 */
6902 
6903 	switch (plane->type) {
6904 	case DRM_PLANE_TYPE_PRIMARY:
6905 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6906 			if (num_formats >= max_formats)
6907 				break;
6908 
6909 			formats[num_formats++] = rgb_formats[i];
6910 		}
6911 
6912 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6913 			formats[num_formats++] = DRM_FORMAT_NV12;
6914 		if (plane_cap && plane_cap->pixel_format_support.p010)
6915 			formats[num_formats++] = DRM_FORMAT_P010;
6916 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6917 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6918 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6919 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6920 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6921 		}
6922 		break;
6923 
6924 	case DRM_PLANE_TYPE_OVERLAY:
6925 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6926 			if (num_formats >= max_formats)
6927 				break;
6928 
6929 			formats[num_formats++] = overlay_formats[i];
6930 		}
6931 		break;
6932 
6933 	case DRM_PLANE_TYPE_CURSOR:
6934 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6935 			if (num_formats >= max_formats)
6936 				break;
6937 
6938 			formats[num_formats++] = cursor_formats[i];
6939 		}
6940 		break;
6941 	}
6942 
6943 	return num_formats;
6944 }
6945 
6946 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6947 				struct drm_plane *plane,
6948 				unsigned long possible_crtcs,
6949 				const struct dc_plane_cap *plane_cap)
6950 {
6951 	uint32_t formats[32];
6952 	int num_formats;
6953 	int res = -EPERM;
6954 	unsigned int supported_rotations;
6955 	uint64_t *modifiers = NULL;
6956 
6957 	num_formats = get_plane_formats(plane, plane_cap, formats,
6958 					ARRAY_SIZE(formats));
6959 
6960 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6961 	if (res)
6962 		return res;
6963 
6964 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6965 				       &dm_plane_funcs, formats, num_formats,
6966 				       modifiers, plane->type, NULL);
6967 	kfree(modifiers);
6968 	if (res)
6969 		return res;
6970 
6971 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6972 	    plane_cap && plane_cap->per_pixel_alpha) {
6973 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6974 					  BIT(DRM_MODE_BLEND_PREMULTI);
6975 
6976 		drm_plane_create_alpha_property(plane);
6977 		drm_plane_create_blend_mode_property(plane, blend_caps);
6978 	}
6979 
6980 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6981 	    plane_cap &&
6982 	    (plane_cap->pixel_format_support.nv12 ||
6983 	     plane_cap->pixel_format_support.p010)) {
6984 		/* This only affects YUV formats. */
6985 		drm_plane_create_color_properties(
6986 			plane,
6987 			BIT(DRM_COLOR_YCBCR_BT601) |
6988 			BIT(DRM_COLOR_YCBCR_BT709) |
6989 			BIT(DRM_COLOR_YCBCR_BT2020),
6990 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6991 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6992 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6993 	}
6994 
6995 	supported_rotations =
6996 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6997 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6998 
6999 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7000 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7001 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7002 						   supported_rotations);
7003 
7004 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7005 
7006 	/* Create (reset) the plane state */
7007 	if (plane->funcs->reset)
7008 		plane->funcs->reset(plane);
7009 
7010 	return 0;
7011 }
7012 
7013 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7014 			       struct drm_plane *plane,
7015 			       uint32_t crtc_index)
7016 {
7017 	struct amdgpu_crtc *acrtc = NULL;
7018 	struct drm_plane *cursor_plane;
7019 
7020 	int res = -ENOMEM;
7021 
7022 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7023 	if (!cursor_plane)
7024 		goto fail;
7025 
7026 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7027 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7028 
7029 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7030 	if (!acrtc)
7031 		goto fail;
7032 
7033 	res = drm_crtc_init_with_planes(
7034 			dm->ddev,
7035 			&acrtc->base,
7036 			plane,
7037 			cursor_plane,
7038 			&amdgpu_dm_crtc_funcs, NULL);
7039 
7040 	if (res)
7041 		goto fail;
7042 
7043 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7044 
7045 	/* Create (reset) the plane state */
7046 	if (acrtc->base.funcs->reset)
7047 		acrtc->base.funcs->reset(&acrtc->base);
7048 
7049 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7050 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7051 
7052 	acrtc->crtc_id = crtc_index;
7053 	acrtc->base.enabled = false;
7054 	acrtc->otg_inst = -1;
7055 
7056 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7057 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7058 				   true, MAX_COLOR_LUT_ENTRIES);
7059 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7060 
7061 	return 0;
7062 
7063 fail:
7064 	kfree(acrtc);
7065 	kfree(cursor_plane);
7066 	return res;
7067 }
7068 
7069 
7070 static int to_drm_connector_type(enum signal_type st)
7071 {
7072 	switch (st) {
7073 	case SIGNAL_TYPE_HDMI_TYPE_A:
7074 		return DRM_MODE_CONNECTOR_HDMIA;
7075 	case SIGNAL_TYPE_EDP:
7076 		return DRM_MODE_CONNECTOR_eDP;
7077 	case SIGNAL_TYPE_LVDS:
7078 		return DRM_MODE_CONNECTOR_LVDS;
7079 	case SIGNAL_TYPE_RGB:
7080 		return DRM_MODE_CONNECTOR_VGA;
7081 	case SIGNAL_TYPE_DISPLAY_PORT:
7082 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7083 		return DRM_MODE_CONNECTOR_DisplayPort;
7084 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7085 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7086 		return DRM_MODE_CONNECTOR_DVID;
7087 	case SIGNAL_TYPE_VIRTUAL:
7088 		return DRM_MODE_CONNECTOR_VIRTUAL;
7089 
7090 	default:
7091 		return DRM_MODE_CONNECTOR_Unknown;
7092 	}
7093 }
7094 
7095 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7096 {
7097 	struct drm_encoder *encoder;
7098 
7099 	/* There is only one encoder per connector */
7100 	drm_connector_for_each_possible_encoder(connector, encoder)
7101 		return encoder;
7102 
7103 	return NULL;
7104 }
7105 
7106 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7107 {
7108 	struct drm_encoder *encoder;
7109 	struct amdgpu_encoder *amdgpu_encoder;
7110 
7111 	encoder = amdgpu_dm_connector_to_encoder(connector);
7112 
7113 	if (encoder == NULL)
7114 		return;
7115 
7116 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7117 
7118 	amdgpu_encoder->native_mode.clock = 0;
7119 
7120 	if (!list_empty(&connector->probed_modes)) {
7121 		struct drm_display_mode *preferred_mode = NULL;
7122 
7123 		list_for_each_entry(preferred_mode,
7124 				    &connector->probed_modes,
7125 				    head) {
7126 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7127 				amdgpu_encoder->native_mode = *preferred_mode;
7128 
7129 			break;
7130 		}
7131 
7132 	}
7133 }
7134 
7135 static struct drm_display_mode *
7136 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7137 			     char *name,
7138 			     int hdisplay, int vdisplay)
7139 {
7140 	struct drm_device *dev = encoder->dev;
7141 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7142 	struct drm_display_mode *mode = NULL;
7143 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7144 
7145 	mode = drm_mode_duplicate(dev, native_mode);
7146 
7147 	if (mode == NULL)
7148 		return NULL;
7149 
7150 	mode->hdisplay = hdisplay;
7151 	mode->vdisplay = vdisplay;
7152 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7153 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7154 
7155 	return mode;
7156 
7157 }
7158 
7159 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7160 						 struct drm_connector *connector)
7161 {
7162 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7163 	struct drm_display_mode *mode = NULL;
7164 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7165 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7166 				to_amdgpu_dm_connector(connector);
7167 	int i;
7168 	int n;
7169 	struct mode_size {
7170 		char name[DRM_DISPLAY_MODE_LEN];
7171 		int w;
7172 		int h;
7173 	} common_modes[] = {
7174 		{  "640x480",  640,  480},
7175 		{  "800x600",  800,  600},
7176 		{ "1024x768", 1024,  768},
7177 		{ "1280x720", 1280,  720},
7178 		{ "1280x800", 1280,  800},
7179 		{"1280x1024", 1280, 1024},
7180 		{ "1440x900", 1440,  900},
7181 		{"1680x1050", 1680, 1050},
7182 		{"1600x1200", 1600, 1200},
7183 		{"1920x1080", 1920, 1080},
7184 		{"1920x1200", 1920, 1200}
7185 	};
7186 
7187 	n = ARRAY_SIZE(common_modes);
7188 
7189 	for (i = 0; i < n; i++) {
7190 		struct drm_display_mode *curmode = NULL;
7191 		bool mode_existed = false;
7192 
7193 		if (common_modes[i].w > native_mode->hdisplay ||
7194 		    common_modes[i].h > native_mode->vdisplay ||
7195 		   (common_modes[i].w == native_mode->hdisplay &&
7196 		    common_modes[i].h == native_mode->vdisplay))
7197 			continue;
7198 
7199 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7200 			if (common_modes[i].w == curmode->hdisplay &&
7201 			    common_modes[i].h == curmode->vdisplay) {
7202 				mode_existed = true;
7203 				break;
7204 			}
7205 		}
7206 
7207 		if (mode_existed)
7208 			continue;
7209 
7210 		mode = amdgpu_dm_create_common_mode(encoder,
7211 				common_modes[i].name, common_modes[i].w,
7212 				common_modes[i].h);
7213 		drm_mode_probed_add(connector, mode);
7214 		amdgpu_dm_connector->num_modes++;
7215 	}
7216 }
7217 
7218 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7219 					      struct edid *edid)
7220 {
7221 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7222 			to_amdgpu_dm_connector(connector);
7223 
7224 	if (edid) {
7225 		/* empty probed_modes */
7226 		INIT_LIST_HEAD(&connector->probed_modes);
7227 		amdgpu_dm_connector->num_modes =
7228 				drm_add_edid_modes(connector, edid);
7229 
7230 		/* sorting the probed modes before calling function
7231 		 * amdgpu_dm_get_native_mode() since EDID can have
7232 		 * more than one preferred mode. The modes that are
7233 		 * later in the probed mode list could be of higher
7234 		 * and preferred resolution. For example, 3840x2160
7235 		 * resolution in base EDID preferred timing and 4096x2160
7236 		 * preferred resolution in DID extension block later.
7237 		 */
7238 		drm_mode_sort(&connector->probed_modes);
7239 		amdgpu_dm_get_native_mode(connector);
7240 
7241 		/* Freesync capabilities are reset by calling
7242 		 * drm_add_edid_modes() and need to be
7243 		 * restored here.
7244 		 */
7245 		amdgpu_dm_update_freesync_caps(connector, edid);
7246 	} else {
7247 		amdgpu_dm_connector->num_modes = 0;
7248 	}
7249 }
7250 
7251 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7252 			      struct drm_display_mode *mode)
7253 {
7254 	struct drm_display_mode *m;
7255 
7256 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7257 		if (drm_mode_equal(m, mode))
7258 			return true;
7259 	}
7260 
7261 	return false;
7262 }
7263 
7264 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7265 {
7266 	const struct drm_display_mode *m;
7267 	struct drm_display_mode *new_mode;
7268 	uint i;
7269 	uint32_t new_modes_count = 0;
7270 
7271 	/* Standard FPS values
7272 	 *
7273 	 * 23.976   - TV/NTSC
7274 	 * 24 	    - Cinema
7275 	 * 25 	    - TV/PAL
7276 	 * 29.97    - TV/NTSC
7277 	 * 30 	    - TV/NTSC
7278 	 * 48 	    - Cinema HFR
7279 	 * 50 	    - TV/PAL
7280 	 * 60 	    - Commonly used
7281 	 * 48,72,96 - Multiples of 24
7282 	 */
7283 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7284 					 48000, 50000, 60000, 72000, 96000 };
7285 
7286 	/*
7287 	 * Find mode with highest refresh rate with the same resolution
7288 	 * as the preferred mode. Some monitors report a preferred mode
7289 	 * with lower resolution than the highest refresh rate supported.
7290 	 */
7291 
7292 	m = get_highest_refresh_rate_mode(aconnector, true);
7293 	if (!m)
7294 		return 0;
7295 
7296 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7297 		uint64_t target_vtotal, target_vtotal_diff;
7298 		uint64_t num, den;
7299 
7300 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7301 			continue;
7302 
7303 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7304 		    common_rates[i] > aconnector->max_vfreq * 1000)
7305 			continue;
7306 
7307 		num = (unsigned long long)m->clock * 1000 * 1000;
7308 		den = common_rates[i] * (unsigned long long)m->htotal;
7309 		target_vtotal = div_u64(num, den);
7310 		target_vtotal_diff = target_vtotal - m->vtotal;
7311 
7312 		/* Check for illegal modes */
7313 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7314 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7315 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7316 			continue;
7317 
7318 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7319 		if (!new_mode)
7320 			goto out;
7321 
7322 		new_mode->vtotal += (u16)target_vtotal_diff;
7323 		new_mode->vsync_start += (u16)target_vtotal_diff;
7324 		new_mode->vsync_end += (u16)target_vtotal_diff;
7325 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7326 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7327 
7328 		if (!is_duplicate_mode(aconnector, new_mode)) {
7329 			drm_mode_probed_add(&aconnector->base, new_mode);
7330 			new_modes_count += 1;
7331 		} else
7332 			drm_mode_destroy(aconnector->base.dev, new_mode);
7333 	}
7334  out:
7335 	return new_modes_count;
7336 }
7337 
7338 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7339 						   struct edid *edid)
7340 {
7341 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7342 		to_amdgpu_dm_connector(connector);
7343 
7344 	if (!(amdgpu_freesync_vid_mode && edid))
7345 		return;
7346 
7347 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7348 		amdgpu_dm_connector->num_modes +=
7349 			add_fs_modes(amdgpu_dm_connector);
7350 }
7351 
7352 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7353 {
7354 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7355 			to_amdgpu_dm_connector(connector);
7356 	struct drm_encoder *encoder;
7357 	struct edid *edid = amdgpu_dm_connector->edid;
7358 
7359 	encoder = amdgpu_dm_connector_to_encoder(connector);
7360 
7361 	if (!drm_edid_is_valid(edid)) {
7362 		amdgpu_dm_connector->num_modes =
7363 				drm_add_modes_noedid(connector, 640, 480);
7364 	} else {
7365 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7366 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7367 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7368 	}
7369 	amdgpu_dm_fbc_init(connector);
7370 
7371 	return amdgpu_dm_connector->num_modes;
7372 }
7373 
7374 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7375 				     struct amdgpu_dm_connector *aconnector,
7376 				     int connector_type,
7377 				     struct dc_link *link,
7378 				     int link_index)
7379 {
7380 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7381 
7382 	/*
7383 	 * Some of the properties below require access to state, like bpc.
7384 	 * Allocate some default initial connector state with our reset helper.
7385 	 */
7386 	if (aconnector->base.funcs->reset)
7387 		aconnector->base.funcs->reset(&aconnector->base);
7388 
7389 	aconnector->connector_id = link_index;
7390 	aconnector->dc_link = link;
7391 	aconnector->base.interlace_allowed = false;
7392 	aconnector->base.doublescan_allowed = false;
7393 	aconnector->base.stereo_allowed = false;
7394 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7395 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7396 	aconnector->audio_inst = -1;
7397 	mutex_init(&aconnector->hpd_lock);
7398 
7399 	/*
7400 	 * configure support HPD hot plug connector_>polled default value is 0
7401 	 * which means HPD hot plug not supported
7402 	 */
7403 	switch (connector_type) {
7404 	case DRM_MODE_CONNECTOR_HDMIA:
7405 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7406 		aconnector->base.ycbcr_420_allowed =
7407 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7408 		break;
7409 	case DRM_MODE_CONNECTOR_DisplayPort:
7410 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7411 		aconnector->base.ycbcr_420_allowed =
7412 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7413 		break;
7414 	case DRM_MODE_CONNECTOR_DVID:
7415 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7416 		break;
7417 	default:
7418 		break;
7419 	}
7420 
7421 	drm_object_attach_property(&aconnector->base.base,
7422 				dm->ddev->mode_config.scaling_mode_property,
7423 				DRM_MODE_SCALE_NONE);
7424 
7425 	drm_object_attach_property(&aconnector->base.base,
7426 				adev->mode_info.underscan_property,
7427 				UNDERSCAN_OFF);
7428 	drm_object_attach_property(&aconnector->base.base,
7429 				adev->mode_info.underscan_hborder_property,
7430 				0);
7431 	drm_object_attach_property(&aconnector->base.base,
7432 				adev->mode_info.underscan_vborder_property,
7433 				0);
7434 
7435 	if (!aconnector->mst_port)
7436 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7437 
7438 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7439 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7440 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7441 
7442 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7443 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7444 		drm_object_attach_property(&aconnector->base.base,
7445 				adev->mode_info.abm_level_property, 0);
7446 	}
7447 
7448 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7449 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7450 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7451 		drm_object_attach_property(
7452 			&aconnector->base.base,
7453 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7454 
7455 		if (!aconnector->mst_port)
7456 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7457 
7458 #ifdef CONFIG_DRM_AMD_DC_HDCP
7459 		if (adev->dm.hdcp_workqueue)
7460 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7461 #endif
7462 	}
7463 }
7464 
7465 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7466 			      struct i2c_msg *msgs, int num)
7467 {
7468 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7469 	struct ddc_service *ddc_service = i2c->ddc_service;
7470 	struct i2c_command cmd;
7471 	int i;
7472 	int result = -EIO;
7473 
7474 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7475 
7476 	if (!cmd.payloads)
7477 		return result;
7478 
7479 	cmd.number_of_payloads = num;
7480 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7481 	cmd.speed = 100;
7482 
7483 	for (i = 0; i < num; i++) {
7484 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7485 		cmd.payloads[i].address = msgs[i].addr;
7486 		cmd.payloads[i].length = msgs[i].len;
7487 		cmd.payloads[i].data = msgs[i].buf;
7488 	}
7489 
7490 	if (dc_submit_i2c(
7491 			ddc_service->ctx->dc,
7492 			ddc_service->ddc_pin->hw_info.ddc_channel,
7493 			&cmd))
7494 		result = num;
7495 
7496 	kfree(cmd.payloads);
7497 	return result;
7498 }
7499 
7500 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7501 {
7502 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7503 }
7504 
7505 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7506 	.master_xfer = amdgpu_dm_i2c_xfer,
7507 	.functionality = amdgpu_dm_i2c_func,
7508 };
7509 
7510 static struct amdgpu_i2c_adapter *
7511 create_i2c(struct ddc_service *ddc_service,
7512 	   int link_index,
7513 	   int *res)
7514 {
7515 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7516 	struct amdgpu_i2c_adapter *i2c;
7517 
7518 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7519 	if (!i2c)
7520 		return NULL;
7521 	i2c->base.owner = THIS_MODULE;
7522 	i2c->base.class = I2C_CLASS_DDC;
7523 	i2c->base.dev.parent = &adev->pdev->dev;
7524 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7525 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7526 	i2c_set_adapdata(&i2c->base, i2c);
7527 	i2c->ddc_service = ddc_service;
7528 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7529 
7530 	return i2c;
7531 }
7532 
7533 
7534 /*
7535  * Note: this function assumes that dc_link_detect() was called for the
7536  * dc_link which will be represented by this aconnector.
7537  */
7538 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7539 				    struct amdgpu_dm_connector *aconnector,
7540 				    uint32_t link_index,
7541 				    struct amdgpu_encoder *aencoder)
7542 {
7543 	int res = 0;
7544 	int connector_type;
7545 	struct dc *dc = dm->dc;
7546 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7547 	struct amdgpu_i2c_adapter *i2c;
7548 
7549 	link->priv = aconnector;
7550 
7551 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7552 
7553 	i2c = create_i2c(link->ddc, link->link_index, &res);
7554 	if (!i2c) {
7555 		DRM_ERROR("Failed to create i2c adapter data\n");
7556 		return -ENOMEM;
7557 	}
7558 
7559 	aconnector->i2c = i2c;
7560 	res = i2c_add_adapter(&i2c->base);
7561 
7562 	if (res) {
7563 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7564 		goto out_free;
7565 	}
7566 
7567 	connector_type = to_drm_connector_type(link->connector_signal);
7568 
7569 	res = drm_connector_init_with_ddc(
7570 			dm->ddev,
7571 			&aconnector->base,
7572 			&amdgpu_dm_connector_funcs,
7573 			connector_type,
7574 			&i2c->base);
7575 
7576 	if (res) {
7577 		DRM_ERROR("connector_init failed\n");
7578 		aconnector->connector_id = -1;
7579 		goto out_free;
7580 	}
7581 
7582 	drm_connector_helper_add(
7583 			&aconnector->base,
7584 			&amdgpu_dm_connector_helper_funcs);
7585 
7586 	amdgpu_dm_connector_init_helper(
7587 		dm,
7588 		aconnector,
7589 		connector_type,
7590 		link,
7591 		link_index);
7592 
7593 	drm_connector_attach_encoder(
7594 		&aconnector->base, &aencoder->base);
7595 
7596 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7597 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7598 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7599 
7600 out_free:
7601 	if (res) {
7602 		kfree(i2c);
7603 		aconnector->i2c = NULL;
7604 	}
7605 	return res;
7606 }
7607 
7608 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7609 {
7610 	switch (adev->mode_info.num_crtc) {
7611 	case 1:
7612 		return 0x1;
7613 	case 2:
7614 		return 0x3;
7615 	case 3:
7616 		return 0x7;
7617 	case 4:
7618 		return 0xf;
7619 	case 5:
7620 		return 0x1f;
7621 	case 6:
7622 	default:
7623 		return 0x3f;
7624 	}
7625 }
7626 
7627 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7628 				  struct amdgpu_encoder *aencoder,
7629 				  uint32_t link_index)
7630 {
7631 	struct amdgpu_device *adev = drm_to_adev(dev);
7632 
7633 	int res = drm_encoder_init(dev,
7634 				   &aencoder->base,
7635 				   &amdgpu_dm_encoder_funcs,
7636 				   DRM_MODE_ENCODER_TMDS,
7637 				   NULL);
7638 
7639 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7640 
7641 	if (!res)
7642 		aencoder->encoder_id = link_index;
7643 	else
7644 		aencoder->encoder_id = -1;
7645 
7646 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7647 
7648 	return res;
7649 }
7650 
7651 static void manage_dm_interrupts(struct amdgpu_device *adev,
7652 				 struct amdgpu_crtc *acrtc,
7653 				 bool enable)
7654 {
7655 	/*
7656 	 * We have no guarantee that the frontend index maps to the same
7657 	 * backend index - some even map to more than one.
7658 	 *
7659 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7660 	 */
7661 	int irq_type =
7662 		amdgpu_display_crtc_idx_to_irq_type(
7663 			adev,
7664 			acrtc->crtc_id);
7665 
7666 	if (enable) {
7667 		drm_crtc_vblank_on(&acrtc->base);
7668 		amdgpu_irq_get(
7669 			adev,
7670 			&adev->pageflip_irq,
7671 			irq_type);
7672 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7673 		amdgpu_irq_get(
7674 			adev,
7675 			&adev->vline0_irq,
7676 			irq_type);
7677 #endif
7678 	} else {
7679 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7680 		amdgpu_irq_put(
7681 			adev,
7682 			&adev->vline0_irq,
7683 			irq_type);
7684 #endif
7685 		amdgpu_irq_put(
7686 			adev,
7687 			&adev->pageflip_irq,
7688 			irq_type);
7689 		drm_crtc_vblank_off(&acrtc->base);
7690 	}
7691 }
7692 
7693 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7694 				      struct amdgpu_crtc *acrtc)
7695 {
7696 	int irq_type =
7697 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7698 
7699 	/**
7700 	 * This reads the current state for the IRQ and force reapplies
7701 	 * the setting to hardware.
7702 	 */
7703 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7704 }
7705 
7706 static bool
7707 is_scaling_state_different(const struct dm_connector_state *dm_state,
7708 			   const struct dm_connector_state *old_dm_state)
7709 {
7710 	if (dm_state->scaling != old_dm_state->scaling)
7711 		return true;
7712 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7713 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7714 			return true;
7715 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7716 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7717 			return true;
7718 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7719 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7720 		return true;
7721 	return false;
7722 }
7723 
7724 #ifdef CONFIG_DRM_AMD_DC_HDCP
7725 static bool is_content_protection_different(struct drm_connector_state *state,
7726 					    const struct drm_connector_state *old_state,
7727 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7728 {
7729 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7730 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7731 
7732 	/* Handle: Type0/1 change */
7733 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7734 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7735 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7736 		return true;
7737 	}
7738 
7739 	/* CP is being re enabled, ignore this
7740 	 *
7741 	 * Handles:	ENABLED -> DESIRED
7742 	 */
7743 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7744 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7745 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7746 		return false;
7747 	}
7748 
7749 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7750 	 *
7751 	 * Handles:	UNDESIRED -> ENABLED
7752 	 */
7753 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7754 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7755 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7756 
7757 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7758 	 * hot-plug, headless s3, dpms
7759 	 *
7760 	 * Handles:	DESIRED -> DESIRED (Special case)
7761 	 */
7762 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7763 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7764 		dm_con_state->update_hdcp = false;
7765 		return true;
7766 	}
7767 
7768 	/*
7769 	 * Handles:	UNDESIRED -> UNDESIRED
7770 	 *		DESIRED -> DESIRED
7771 	 *		ENABLED -> ENABLED
7772 	 */
7773 	if (old_state->content_protection == state->content_protection)
7774 		return false;
7775 
7776 	/*
7777 	 * Handles:	UNDESIRED -> DESIRED
7778 	 *		DESIRED -> UNDESIRED
7779 	 *		ENABLED -> UNDESIRED
7780 	 */
7781 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7782 		return true;
7783 
7784 	/*
7785 	 * Handles:	DESIRED -> ENABLED
7786 	 */
7787 	return false;
7788 }
7789 
7790 #endif
7791 static void remove_stream(struct amdgpu_device *adev,
7792 			  struct amdgpu_crtc *acrtc,
7793 			  struct dc_stream_state *stream)
7794 {
7795 	/* this is the update mode case */
7796 
7797 	acrtc->otg_inst = -1;
7798 	acrtc->enabled = false;
7799 }
7800 
7801 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7802 			       struct dc_cursor_position *position)
7803 {
7804 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7805 	int x, y;
7806 	int xorigin = 0, yorigin = 0;
7807 
7808 	if (!crtc || !plane->state->fb)
7809 		return 0;
7810 
7811 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7812 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7813 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7814 			  __func__,
7815 			  plane->state->crtc_w,
7816 			  plane->state->crtc_h);
7817 		return -EINVAL;
7818 	}
7819 
7820 	x = plane->state->crtc_x;
7821 	y = plane->state->crtc_y;
7822 
7823 	if (x <= -amdgpu_crtc->max_cursor_width ||
7824 	    y <= -amdgpu_crtc->max_cursor_height)
7825 		return 0;
7826 
7827 	if (x < 0) {
7828 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7829 		x = 0;
7830 	}
7831 	if (y < 0) {
7832 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7833 		y = 0;
7834 	}
7835 	position->enable = true;
7836 	position->translate_by_source = true;
7837 	position->x = x;
7838 	position->y = y;
7839 	position->x_hotspot = xorigin;
7840 	position->y_hotspot = yorigin;
7841 
7842 	return 0;
7843 }
7844 
7845 static void handle_cursor_update(struct drm_plane *plane,
7846 				 struct drm_plane_state *old_plane_state)
7847 {
7848 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7849 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7850 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7851 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7852 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7853 	uint64_t address = afb ? afb->address : 0;
7854 	struct dc_cursor_position position = {0};
7855 	struct dc_cursor_attributes attributes;
7856 	int ret;
7857 
7858 	if (!plane->state->fb && !old_plane_state->fb)
7859 		return;
7860 
7861 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7862 			 __func__,
7863 			 amdgpu_crtc->crtc_id,
7864 			 plane->state->crtc_w,
7865 			 plane->state->crtc_h);
7866 
7867 	ret = get_cursor_position(plane, crtc, &position);
7868 	if (ret)
7869 		return;
7870 
7871 	if (!position.enable) {
7872 		/* turn off cursor */
7873 		if (crtc_state && crtc_state->stream) {
7874 			mutex_lock(&adev->dm.dc_lock);
7875 			dc_stream_set_cursor_position(crtc_state->stream,
7876 						      &position);
7877 			mutex_unlock(&adev->dm.dc_lock);
7878 		}
7879 		return;
7880 	}
7881 
7882 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7883 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7884 
7885 	memset(&attributes, 0, sizeof(attributes));
7886 	attributes.address.high_part = upper_32_bits(address);
7887 	attributes.address.low_part  = lower_32_bits(address);
7888 	attributes.width             = plane->state->crtc_w;
7889 	attributes.height            = plane->state->crtc_h;
7890 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7891 	attributes.rotation_angle    = 0;
7892 	attributes.attribute_flags.value = 0;
7893 
7894 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7895 
7896 	if (crtc_state->stream) {
7897 		mutex_lock(&adev->dm.dc_lock);
7898 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7899 							 &attributes))
7900 			DRM_ERROR("DC failed to set cursor attributes\n");
7901 
7902 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7903 						   &position))
7904 			DRM_ERROR("DC failed to set cursor position\n");
7905 		mutex_unlock(&adev->dm.dc_lock);
7906 	}
7907 }
7908 
7909 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7910 {
7911 
7912 	assert_spin_locked(&acrtc->base.dev->event_lock);
7913 	WARN_ON(acrtc->event);
7914 
7915 	acrtc->event = acrtc->base.state->event;
7916 
7917 	/* Set the flip status */
7918 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7919 
7920 	/* Mark this event as consumed */
7921 	acrtc->base.state->event = NULL;
7922 
7923 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7924 						 acrtc->crtc_id);
7925 }
7926 
7927 static void update_freesync_state_on_stream(
7928 	struct amdgpu_display_manager *dm,
7929 	struct dm_crtc_state *new_crtc_state,
7930 	struct dc_stream_state *new_stream,
7931 	struct dc_plane_state *surface,
7932 	u32 flip_timestamp_in_us)
7933 {
7934 	struct mod_vrr_params vrr_params;
7935 	struct dc_info_packet vrr_infopacket = {0};
7936 	struct amdgpu_device *adev = dm->adev;
7937 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7938 	unsigned long flags;
7939 	bool pack_sdp_v1_3 = false;
7940 
7941 	if (!new_stream)
7942 		return;
7943 
7944 	/*
7945 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7946 	 * For now it's sufficient to just guard against these conditions.
7947 	 */
7948 
7949 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7950 		return;
7951 
7952 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7953         vrr_params = acrtc->dm_irq_params.vrr_params;
7954 
7955 	if (surface) {
7956 		mod_freesync_handle_preflip(
7957 			dm->freesync_module,
7958 			surface,
7959 			new_stream,
7960 			flip_timestamp_in_us,
7961 			&vrr_params);
7962 
7963 		if (adev->family < AMDGPU_FAMILY_AI &&
7964 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7965 			mod_freesync_handle_v_update(dm->freesync_module,
7966 						     new_stream, &vrr_params);
7967 
7968 			/* Need to call this before the frame ends. */
7969 			dc_stream_adjust_vmin_vmax(dm->dc,
7970 						   new_crtc_state->stream,
7971 						   &vrr_params.adjust);
7972 		}
7973 	}
7974 
7975 	mod_freesync_build_vrr_infopacket(
7976 		dm->freesync_module,
7977 		new_stream,
7978 		&vrr_params,
7979 		PACKET_TYPE_VRR,
7980 		TRANSFER_FUNC_UNKNOWN,
7981 		&vrr_infopacket,
7982 		pack_sdp_v1_3);
7983 
7984 	new_crtc_state->freesync_timing_changed |=
7985 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7986 			&vrr_params.adjust,
7987 			sizeof(vrr_params.adjust)) != 0);
7988 
7989 	new_crtc_state->freesync_vrr_info_changed |=
7990 		(memcmp(&new_crtc_state->vrr_infopacket,
7991 			&vrr_infopacket,
7992 			sizeof(vrr_infopacket)) != 0);
7993 
7994 	acrtc->dm_irq_params.vrr_params = vrr_params;
7995 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7996 
7997 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7998 	new_stream->vrr_infopacket = vrr_infopacket;
7999 
8000 	if (new_crtc_state->freesync_vrr_info_changed)
8001 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8002 			      new_crtc_state->base.crtc->base.id,
8003 			      (int)new_crtc_state->base.vrr_enabled,
8004 			      (int)vrr_params.state);
8005 
8006 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8007 }
8008 
8009 static void update_stream_irq_parameters(
8010 	struct amdgpu_display_manager *dm,
8011 	struct dm_crtc_state *new_crtc_state)
8012 {
8013 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8014 	struct mod_vrr_params vrr_params;
8015 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8016 	struct amdgpu_device *adev = dm->adev;
8017 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8018 	unsigned long flags;
8019 
8020 	if (!new_stream)
8021 		return;
8022 
8023 	/*
8024 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8025 	 * For now it's sufficient to just guard against these conditions.
8026 	 */
8027 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8028 		return;
8029 
8030 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8031 	vrr_params = acrtc->dm_irq_params.vrr_params;
8032 
8033 	if (new_crtc_state->vrr_supported &&
8034 	    config.min_refresh_in_uhz &&
8035 	    config.max_refresh_in_uhz) {
8036 		/*
8037 		 * if freesync compatible mode was set, config.state will be set
8038 		 * in atomic check
8039 		 */
8040 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8041 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8042 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8043 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8044 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8045 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8046 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8047 		} else {
8048 			config.state = new_crtc_state->base.vrr_enabled ?
8049 						     VRR_STATE_ACTIVE_VARIABLE :
8050 						     VRR_STATE_INACTIVE;
8051 		}
8052 	} else {
8053 		config.state = VRR_STATE_UNSUPPORTED;
8054 	}
8055 
8056 	mod_freesync_build_vrr_params(dm->freesync_module,
8057 				      new_stream,
8058 				      &config, &vrr_params);
8059 
8060 	new_crtc_state->freesync_timing_changed |=
8061 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8062 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8063 
8064 	new_crtc_state->freesync_config = config;
8065 	/* Copy state for access from DM IRQ handler */
8066 	acrtc->dm_irq_params.freesync_config = config;
8067 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8068 	acrtc->dm_irq_params.vrr_params = vrr_params;
8069 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8070 }
8071 
8072 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8073 					    struct dm_crtc_state *new_state)
8074 {
8075 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8076 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8077 
8078 	if (!old_vrr_active && new_vrr_active) {
8079 		/* Transition VRR inactive -> active:
8080 		 * While VRR is active, we must not disable vblank irq, as a
8081 		 * reenable after disable would compute bogus vblank/pflip
8082 		 * timestamps if it likely happened inside display front-porch.
8083 		 *
8084 		 * We also need vupdate irq for the actual core vblank handling
8085 		 * at end of vblank.
8086 		 */
8087 		dm_set_vupdate_irq(new_state->base.crtc, true);
8088 		drm_crtc_vblank_get(new_state->base.crtc);
8089 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8090 				 __func__, new_state->base.crtc->base.id);
8091 	} else if (old_vrr_active && !new_vrr_active) {
8092 		/* Transition VRR active -> inactive:
8093 		 * Allow vblank irq disable again for fixed refresh rate.
8094 		 */
8095 		dm_set_vupdate_irq(new_state->base.crtc, false);
8096 		drm_crtc_vblank_put(new_state->base.crtc);
8097 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8098 				 __func__, new_state->base.crtc->base.id);
8099 	}
8100 }
8101 
8102 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8103 {
8104 	struct drm_plane *plane;
8105 	struct drm_plane_state *old_plane_state, *new_plane_state;
8106 	int i;
8107 
8108 	/*
8109 	 * TODO: Make this per-stream so we don't issue redundant updates for
8110 	 * commits with multiple streams.
8111 	 */
8112 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8113 				       new_plane_state, i)
8114 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8115 			handle_cursor_update(plane, old_plane_state);
8116 }
8117 
8118 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8119 				    struct dc_state *dc_state,
8120 				    struct drm_device *dev,
8121 				    struct amdgpu_display_manager *dm,
8122 				    struct drm_crtc *pcrtc,
8123 				    bool wait_for_vblank)
8124 {
8125 	uint32_t i;
8126 	uint64_t timestamp_ns;
8127 	struct drm_plane *plane;
8128 	struct drm_plane_state *old_plane_state, *new_plane_state;
8129 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8130 	struct drm_crtc_state *new_pcrtc_state =
8131 			drm_atomic_get_new_crtc_state(state, pcrtc);
8132 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8133 	struct dm_crtc_state *dm_old_crtc_state =
8134 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8135 	int planes_count = 0, vpos, hpos;
8136 	long r;
8137 	unsigned long flags;
8138 	struct amdgpu_bo *abo;
8139 	uint32_t target_vblank, last_flip_vblank;
8140 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8141 	bool pflip_present = false;
8142 	struct {
8143 		struct dc_surface_update surface_updates[MAX_SURFACES];
8144 		struct dc_plane_info plane_infos[MAX_SURFACES];
8145 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8146 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8147 		struct dc_stream_update stream_update;
8148 	} *bundle;
8149 
8150 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8151 
8152 	if (!bundle) {
8153 		dm_error("Failed to allocate update bundle\n");
8154 		goto cleanup;
8155 	}
8156 
8157 	/*
8158 	 * Disable the cursor first if we're disabling all the planes.
8159 	 * It'll remain on the screen after the planes are re-enabled
8160 	 * if we don't.
8161 	 */
8162 	if (acrtc_state->active_planes == 0)
8163 		amdgpu_dm_commit_cursors(state);
8164 
8165 	/* update planes when needed */
8166 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8167 		struct drm_crtc *crtc = new_plane_state->crtc;
8168 		struct drm_crtc_state *new_crtc_state;
8169 		struct drm_framebuffer *fb = new_plane_state->fb;
8170 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8171 		bool plane_needs_flip;
8172 		struct dc_plane_state *dc_plane;
8173 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8174 
8175 		/* Cursor plane is handled after stream updates */
8176 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8177 			continue;
8178 
8179 		if (!fb || !crtc || pcrtc != crtc)
8180 			continue;
8181 
8182 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8183 		if (!new_crtc_state->active)
8184 			continue;
8185 
8186 		dc_plane = dm_new_plane_state->dc_state;
8187 
8188 		bundle->surface_updates[planes_count].surface = dc_plane;
8189 		if (new_pcrtc_state->color_mgmt_changed) {
8190 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8191 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8192 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8193 		}
8194 
8195 		fill_dc_scaling_info(new_plane_state,
8196 				     &bundle->scaling_infos[planes_count]);
8197 
8198 		bundle->surface_updates[planes_count].scaling_info =
8199 			&bundle->scaling_infos[planes_count];
8200 
8201 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8202 
8203 		pflip_present = pflip_present || plane_needs_flip;
8204 
8205 		if (!plane_needs_flip) {
8206 			planes_count += 1;
8207 			continue;
8208 		}
8209 
8210 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8211 
8212 		/*
8213 		 * Wait for all fences on this FB. Do limited wait to avoid
8214 		 * deadlock during GPU reset when this fence will not signal
8215 		 * but we hold reservation lock for the BO.
8216 		 */
8217 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8218 							false,
8219 							msecs_to_jiffies(5000));
8220 		if (unlikely(r <= 0))
8221 			DRM_ERROR("Waiting for fences timed out!");
8222 
8223 		fill_dc_plane_info_and_addr(
8224 			dm->adev, new_plane_state,
8225 			afb->tiling_flags,
8226 			&bundle->plane_infos[planes_count],
8227 			&bundle->flip_addrs[planes_count].address,
8228 			afb->tmz_surface, false);
8229 
8230 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8231 				 new_plane_state->plane->index,
8232 				 bundle->plane_infos[planes_count].dcc.enable);
8233 
8234 		bundle->surface_updates[planes_count].plane_info =
8235 			&bundle->plane_infos[planes_count];
8236 
8237 		/*
8238 		 * Only allow immediate flips for fast updates that don't
8239 		 * change FB pitch, DCC state, rotation or mirroing.
8240 		 */
8241 		bundle->flip_addrs[planes_count].flip_immediate =
8242 			crtc->state->async_flip &&
8243 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8244 
8245 		timestamp_ns = ktime_get_ns();
8246 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8247 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8248 		bundle->surface_updates[planes_count].surface = dc_plane;
8249 
8250 		if (!bundle->surface_updates[planes_count].surface) {
8251 			DRM_ERROR("No surface for CRTC: id=%d\n",
8252 					acrtc_attach->crtc_id);
8253 			continue;
8254 		}
8255 
8256 		if (plane == pcrtc->primary)
8257 			update_freesync_state_on_stream(
8258 				dm,
8259 				acrtc_state,
8260 				acrtc_state->stream,
8261 				dc_plane,
8262 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8263 
8264 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8265 				 __func__,
8266 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8267 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8268 
8269 		planes_count += 1;
8270 
8271 	}
8272 
8273 	if (pflip_present) {
8274 		if (!vrr_active) {
8275 			/* Use old throttling in non-vrr fixed refresh rate mode
8276 			 * to keep flip scheduling based on target vblank counts
8277 			 * working in a backwards compatible way, e.g., for
8278 			 * clients using the GLX_OML_sync_control extension or
8279 			 * DRI3/Present extension with defined target_msc.
8280 			 */
8281 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8282 		}
8283 		else {
8284 			/* For variable refresh rate mode only:
8285 			 * Get vblank of last completed flip to avoid > 1 vrr
8286 			 * flips per video frame by use of throttling, but allow
8287 			 * flip programming anywhere in the possibly large
8288 			 * variable vrr vblank interval for fine-grained flip
8289 			 * timing control and more opportunity to avoid stutter
8290 			 * on late submission of flips.
8291 			 */
8292 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8293 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8294 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8295 		}
8296 
8297 		target_vblank = last_flip_vblank + wait_for_vblank;
8298 
8299 		/*
8300 		 * Wait until we're out of the vertical blank period before the one
8301 		 * targeted by the flip
8302 		 */
8303 		while ((acrtc_attach->enabled &&
8304 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8305 							    0, &vpos, &hpos, NULL,
8306 							    NULL, &pcrtc->hwmode)
8307 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8308 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8309 			(int)(target_vblank -
8310 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8311 			usleep_range(1000, 1100);
8312 		}
8313 
8314 		/**
8315 		 * Prepare the flip event for the pageflip interrupt to handle.
8316 		 *
8317 		 * This only works in the case where we've already turned on the
8318 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8319 		 * from 0 -> n planes we have to skip a hardware generated event
8320 		 * and rely on sending it from software.
8321 		 */
8322 		if (acrtc_attach->base.state->event &&
8323 		    acrtc_state->active_planes > 0) {
8324 			drm_crtc_vblank_get(pcrtc);
8325 
8326 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8327 
8328 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8329 			prepare_flip_isr(acrtc_attach);
8330 
8331 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8332 		}
8333 
8334 		if (acrtc_state->stream) {
8335 			if (acrtc_state->freesync_vrr_info_changed)
8336 				bundle->stream_update.vrr_infopacket =
8337 					&acrtc_state->stream->vrr_infopacket;
8338 		}
8339 	}
8340 
8341 	/* Update the planes if changed or disable if we don't have any. */
8342 	if ((planes_count || acrtc_state->active_planes == 0) &&
8343 		acrtc_state->stream) {
8344 		bundle->stream_update.stream = acrtc_state->stream;
8345 		if (new_pcrtc_state->mode_changed) {
8346 			bundle->stream_update.src = acrtc_state->stream->src;
8347 			bundle->stream_update.dst = acrtc_state->stream->dst;
8348 		}
8349 
8350 		if (new_pcrtc_state->color_mgmt_changed) {
8351 			/*
8352 			 * TODO: This isn't fully correct since we've actually
8353 			 * already modified the stream in place.
8354 			 */
8355 			bundle->stream_update.gamut_remap =
8356 				&acrtc_state->stream->gamut_remap_matrix;
8357 			bundle->stream_update.output_csc_transform =
8358 				&acrtc_state->stream->csc_color_matrix;
8359 			bundle->stream_update.out_transfer_func =
8360 				acrtc_state->stream->out_transfer_func;
8361 		}
8362 
8363 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8364 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8365 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8366 
8367 		/*
8368 		 * If FreeSync state on the stream has changed then we need to
8369 		 * re-adjust the min/max bounds now that DC doesn't handle this
8370 		 * as part of commit.
8371 		 */
8372 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8373 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8374 			dc_stream_adjust_vmin_vmax(
8375 				dm->dc, acrtc_state->stream,
8376 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8377 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8378 		}
8379 		mutex_lock(&dm->dc_lock);
8380 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8381 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8382 			amdgpu_dm_psr_disable(acrtc_state->stream);
8383 
8384 		dc_commit_updates_for_stream(dm->dc,
8385 						     bundle->surface_updates,
8386 						     planes_count,
8387 						     acrtc_state->stream,
8388 						     &bundle->stream_update,
8389 						     dc_state);
8390 
8391 		/**
8392 		 * Enable or disable the interrupts on the backend.
8393 		 *
8394 		 * Most pipes are put into power gating when unused.
8395 		 *
8396 		 * When power gating is enabled on a pipe we lose the
8397 		 * interrupt enablement state when power gating is disabled.
8398 		 *
8399 		 * So we need to update the IRQ control state in hardware
8400 		 * whenever the pipe turns on (since it could be previously
8401 		 * power gated) or off (since some pipes can't be power gated
8402 		 * on some ASICs).
8403 		 */
8404 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8405 			dm_update_pflip_irq_state(drm_to_adev(dev),
8406 						  acrtc_attach);
8407 
8408 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8409 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8410 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8411 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8412 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8413 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8414 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8415 			amdgpu_dm_psr_enable(acrtc_state->stream);
8416 		}
8417 
8418 		mutex_unlock(&dm->dc_lock);
8419 	}
8420 
8421 	/*
8422 	 * Update cursor state *after* programming all the planes.
8423 	 * This avoids redundant programming in the case where we're going
8424 	 * to be disabling a single plane - those pipes are being disabled.
8425 	 */
8426 	if (acrtc_state->active_planes)
8427 		amdgpu_dm_commit_cursors(state);
8428 
8429 cleanup:
8430 	kfree(bundle);
8431 }
8432 
8433 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8434 				   struct drm_atomic_state *state)
8435 {
8436 	struct amdgpu_device *adev = drm_to_adev(dev);
8437 	struct amdgpu_dm_connector *aconnector;
8438 	struct drm_connector *connector;
8439 	struct drm_connector_state *old_con_state, *new_con_state;
8440 	struct drm_crtc_state *new_crtc_state;
8441 	struct dm_crtc_state *new_dm_crtc_state;
8442 	const struct dc_stream_status *status;
8443 	int i, inst;
8444 
8445 	/* Notify device removals. */
8446 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8447 		if (old_con_state->crtc != new_con_state->crtc) {
8448 			/* CRTC changes require notification. */
8449 			goto notify;
8450 		}
8451 
8452 		if (!new_con_state->crtc)
8453 			continue;
8454 
8455 		new_crtc_state = drm_atomic_get_new_crtc_state(
8456 			state, new_con_state->crtc);
8457 
8458 		if (!new_crtc_state)
8459 			continue;
8460 
8461 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8462 			continue;
8463 
8464 	notify:
8465 		aconnector = to_amdgpu_dm_connector(connector);
8466 
8467 		mutex_lock(&adev->dm.audio_lock);
8468 		inst = aconnector->audio_inst;
8469 		aconnector->audio_inst = -1;
8470 		mutex_unlock(&adev->dm.audio_lock);
8471 
8472 		amdgpu_dm_audio_eld_notify(adev, inst);
8473 	}
8474 
8475 	/* Notify audio device additions. */
8476 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8477 		if (!new_con_state->crtc)
8478 			continue;
8479 
8480 		new_crtc_state = drm_atomic_get_new_crtc_state(
8481 			state, new_con_state->crtc);
8482 
8483 		if (!new_crtc_state)
8484 			continue;
8485 
8486 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8487 			continue;
8488 
8489 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8490 		if (!new_dm_crtc_state->stream)
8491 			continue;
8492 
8493 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8494 		if (!status)
8495 			continue;
8496 
8497 		aconnector = to_amdgpu_dm_connector(connector);
8498 
8499 		mutex_lock(&adev->dm.audio_lock);
8500 		inst = status->audio_inst;
8501 		aconnector->audio_inst = inst;
8502 		mutex_unlock(&adev->dm.audio_lock);
8503 
8504 		amdgpu_dm_audio_eld_notify(adev, inst);
8505 	}
8506 }
8507 
8508 /*
8509  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8510  * @crtc_state: the DRM CRTC state
8511  * @stream_state: the DC stream state.
8512  *
8513  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8514  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8515  */
8516 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8517 						struct dc_stream_state *stream_state)
8518 {
8519 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8520 }
8521 
8522 /**
8523  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8524  * @state: The atomic state to commit
8525  *
8526  * This will tell DC to commit the constructed DC state from atomic_check,
8527  * programming the hardware. Any failures here implies a hardware failure, since
8528  * atomic check should have filtered anything non-kosher.
8529  */
8530 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8531 {
8532 	struct drm_device *dev = state->dev;
8533 	struct amdgpu_device *adev = drm_to_adev(dev);
8534 	struct amdgpu_display_manager *dm = &adev->dm;
8535 	struct dm_atomic_state *dm_state;
8536 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8537 	uint32_t i, j;
8538 	struct drm_crtc *crtc;
8539 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8540 	unsigned long flags;
8541 	bool wait_for_vblank = true;
8542 	struct drm_connector *connector;
8543 	struct drm_connector_state *old_con_state, *new_con_state;
8544 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8545 	int crtc_disable_count = 0;
8546 	bool mode_set_reset_required = false;
8547 
8548 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8549 
8550 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8551 
8552 	dm_state = dm_atomic_get_new_state(state);
8553 	if (dm_state && dm_state->context) {
8554 		dc_state = dm_state->context;
8555 	} else {
8556 		/* No state changes, retain current state. */
8557 		dc_state_temp = dc_create_state(dm->dc);
8558 		ASSERT(dc_state_temp);
8559 		dc_state = dc_state_temp;
8560 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8561 	}
8562 
8563 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8564 				       new_crtc_state, i) {
8565 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8566 
8567 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8568 
8569 		if (old_crtc_state->active &&
8570 		    (!new_crtc_state->active ||
8571 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8572 			manage_dm_interrupts(adev, acrtc, false);
8573 			dc_stream_release(dm_old_crtc_state->stream);
8574 		}
8575 	}
8576 
8577 	drm_atomic_helper_calc_timestamping_constants(state);
8578 
8579 	/* update changed items */
8580 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8581 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8582 
8583 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8584 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8585 
8586 		DRM_DEBUG_DRIVER(
8587 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8588 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8589 			"connectors_changed:%d\n",
8590 			acrtc->crtc_id,
8591 			new_crtc_state->enable,
8592 			new_crtc_state->active,
8593 			new_crtc_state->planes_changed,
8594 			new_crtc_state->mode_changed,
8595 			new_crtc_state->active_changed,
8596 			new_crtc_state->connectors_changed);
8597 
8598 		/* Disable cursor if disabling crtc */
8599 		if (old_crtc_state->active && !new_crtc_state->active) {
8600 			struct dc_cursor_position position;
8601 
8602 			memset(&position, 0, sizeof(position));
8603 			mutex_lock(&dm->dc_lock);
8604 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8605 			mutex_unlock(&dm->dc_lock);
8606 		}
8607 
8608 		/* Copy all transient state flags into dc state */
8609 		if (dm_new_crtc_state->stream) {
8610 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8611 							    dm_new_crtc_state->stream);
8612 		}
8613 
8614 		/* handles headless hotplug case, updating new_state and
8615 		 * aconnector as needed
8616 		 */
8617 
8618 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8619 
8620 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8621 
8622 			if (!dm_new_crtc_state->stream) {
8623 				/*
8624 				 * this could happen because of issues with
8625 				 * userspace notifications delivery.
8626 				 * In this case userspace tries to set mode on
8627 				 * display which is disconnected in fact.
8628 				 * dc_sink is NULL in this case on aconnector.
8629 				 * We expect reset mode will come soon.
8630 				 *
8631 				 * This can also happen when unplug is done
8632 				 * during resume sequence ended
8633 				 *
8634 				 * In this case, we want to pretend we still
8635 				 * have a sink to keep the pipe running so that
8636 				 * hw state is consistent with the sw state
8637 				 */
8638 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8639 						__func__, acrtc->base.base.id);
8640 				continue;
8641 			}
8642 
8643 			if (dm_old_crtc_state->stream)
8644 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8645 
8646 			pm_runtime_get_noresume(dev->dev);
8647 
8648 			acrtc->enabled = true;
8649 			acrtc->hw_mode = new_crtc_state->mode;
8650 			crtc->hwmode = new_crtc_state->mode;
8651 			mode_set_reset_required = true;
8652 		} else if (modereset_required(new_crtc_state)) {
8653 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8654 			/* i.e. reset mode */
8655 			if (dm_old_crtc_state->stream)
8656 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8657 
8658 			mode_set_reset_required = true;
8659 		}
8660 	} /* for_each_crtc_in_state() */
8661 
8662 	if (dc_state) {
8663 		/* if there mode set or reset, disable eDP PSR */
8664 		if (mode_set_reset_required)
8665 			amdgpu_dm_psr_disable_all(dm);
8666 
8667 		dm_enable_per_frame_crtc_master_sync(dc_state);
8668 		mutex_lock(&dm->dc_lock);
8669 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8670 		mutex_unlock(&dm->dc_lock);
8671 	}
8672 
8673 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8674 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8675 
8676 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8677 
8678 		if (dm_new_crtc_state->stream != NULL) {
8679 			const struct dc_stream_status *status =
8680 					dc_stream_get_status(dm_new_crtc_state->stream);
8681 
8682 			if (!status)
8683 				status = dc_stream_get_status_from_state(dc_state,
8684 									 dm_new_crtc_state->stream);
8685 			if (!status)
8686 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8687 			else
8688 				acrtc->otg_inst = status->primary_otg_inst;
8689 		}
8690 	}
8691 #ifdef CONFIG_DRM_AMD_DC_HDCP
8692 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8693 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8694 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8695 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8696 
8697 		new_crtc_state = NULL;
8698 
8699 		if (acrtc)
8700 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8701 
8702 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8703 
8704 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8705 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8706 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8707 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8708 			dm_new_con_state->update_hdcp = true;
8709 			continue;
8710 		}
8711 
8712 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8713 			hdcp_update_display(
8714 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8715 				new_con_state->hdcp_content_type,
8716 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8717 	}
8718 #endif
8719 
8720 	/* Handle connector state changes */
8721 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8722 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8723 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8724 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8725 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8726 		struct dc_stream_update stream_update;
8727 		struct dc_info_packet hdr_packet;
8728 		struct dc_stream_status *status = NULL;
8729 		bool abm_changed, hdr_changed, scaling_changed;
8730 
8731 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8732 		memset(&stream_update, 0, sizeof(stream_update));
8733 
8734 		if (acrtc) {
8735 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8736 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8737 		}
8738 
8739 		/* Skip any modesets/resets */
8740 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8741 			continue;
8742 
8743 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8744 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8745 
8746 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8747 							     dm_old_con_state);
8748 
8749 		abm_changed = dm_new_crtc_state->abm_level !=
8750 			      dm_old_crtc_state->abm_level;
8751 
8752 		hdr_changed =
8753 			is_hdr_metadata_different(old_con_state, new_con_state);
8754 
8755 		if (!scaling_changed && !abm_changed && !hdr_changed)
8756 			continue;
8757 
8758 		stream_update.stream = dm_new_crtc_state->stream;
8759 		if (scaling_changed) {
8760 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8761 					dm_new_con_state, dm_new_crtc_state->stream);
8762 
8763 			stream_update.src = dm_new_crtc_state->stream->src;
8764 			stream_update.dst = dm_new_crtc_state->stream->dst;
8765 		}
8766 
8767 		if (abm_changed) {
8768 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8769 
8770 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8771 		}
8772 
8773 		if (hdr_changed) {
8774 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8775 			stream_update.hdr_static_metadata = &hdr_packet;
8776 		}
8777 
8778 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8779 		WARN_ON(!status);
8780 		WARN_ON(!status->plane_count);
8781 
8782 		/*
8783 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8784 		 * Here we create an empty update on each plane.
8785 		 * To fix this, DC should permit updating only stream properties.
8786 		 */
8787 		for (j = 0; j < status->plane_count; j++)
8788 			dummy_updates[j].surface = status->plane_states[0];
8789 
8790 
8791 		mutex_lock(&dm->dc_lock);
8792 		dc_commit_updates_for_stream(dm->dc,
8793 						     dummy_updates,
8794 						     status->plane_count,
8795 						     dm_new_crtc_state->stream,
8796 						     &stream_update,
8797 						     dc_state);
8798 		mutex_unlock(&dm->dc_lock);
8799 	}
8800 
8801 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8802 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8803 				      new_crtc_state, i) {
8804 		if (old_crtc_state->active && !new_crtc_state->active)
8805 			crtc_disable_count++;
8806 
8807 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8808 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8809 
8810 		/* For freesync config update on crtc state and params for irq */
8811 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8812 
8813 		/* Handle vrr on->off / off->on transitions */
8814 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8815 						dm_new_crtc_state);
8816 	}
8817 
8818 	/**
8819 	 * Enable interrupts for CRTCs that are newly enabled or went through
8820 	 * a modeset. It was intentionally deferred until after the front end
8821 	 * state was modified to wait until the OTG was on and so the IRQ
8822 	 * handlers didn't access stale or invalid state.
8823 	 */
8824 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8825 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8826 #ifdef CONFIG_DEBUG_FS
8827 		bool configure_crc = false;
8828 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8829 #endif
8830 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8831 
8832 		if (new_crtc_state->active &&
8833 		    (!old_crtc_state->active ||
8834 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8835 			dc_stream_retain(dm_new_crtc_state->stream);
8836 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8837 			manage_dm_interrupts(adev, acrtc, true);
8838 
8839 #ifdef CONFIG_DEBUG_FS
8840 			/**
8841 			 * Frontend may have changed so reapply the CRC capture
8842 			 * settings for the stream.
8843 			 */
8844 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8845 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8846 			cur_crc_src = acrtc->dm_irq_params.crc_src;
8847 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8848 
8849 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8850 				configure_crc = true;
8851 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8852 				if (amdgpu_dm_crc_window_is_activated(crtc))
8853 					configure_crc = false;
8854 #endif
8855 			}
8856 
8857 			if (configure_crc)
8858 				amdgpu_dm_crtc_configure_crc_source(
8859 					crtc, dm_new_crtc_state, cur_crc_src);
8860 #endif
8861 		}
8862 	}
8863 
8864 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8865 		if (new_crtc_state->async_flip)
8866 			wait_for_vblank = false;
8867 
8868 	/* update planes when needed per crtc*/
8869 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8870 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8871 
8872 		if (dm_new_crtc_state->stream)
8873 			amdgpu_dm_commit_planes(state, dc_state, dev,
8874 						dm, crtc, wait_for_vblank);
8875 	}
8876 
8877 	/* Update audio instances for each connector. */
8878 	amdgpu_dm_commit_audio(dev, state);
8879 
8880 	/*
8881 	 * send vblank event on all events not handled in flip and
8882 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8883 	 */
8884 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8885 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8886 
8887 		if (new_crtc_state->event)
8888 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8889 
8890 		new_crtc_state->event = NULL;
8891 	}
8892 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8893 
8894 	/* Signal HW programming completion */
8895 	drm_atomic_helper_commit_hw_done(state);
8896 
8897 	if (wait_for_vblank)
8898 		drm_atomic_helper_wait_for_flip_done(dev, state);
8899 
8900 	drm_atomic_helper_cleanup_planes(dev, state);
8901 
8902 	/* return the stolen vga memory back to VRAM */
8903 	if (!adev->mman.keep_stolen_vga_memory)
8904 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8905 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8906 
8907 	/*
8908 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8909 	 * so we can put the GPU into runtime suspend if we're not driving any
8910 	 * displays anymore
8911 	 */
8912 	for (i = 0; i < crtc_disable_count; i++)
8913 		pm_runtime_put_autosuspend(dev->dev);
8914 	pm_runtime_mark_last_busy(dev->dev);
8915 
8916 	if (dc_state_temp)
8917 		dc_release_state(dc_state_temp);
8918 }
8919 
8920 
8921 static int dm_force_atomic_commit(struct drm_connector *connector)
8922 {
8923 	int ret = 0;
8924 	struct drm_device *ddev = connector->dev;
8925 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8926 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8927 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8928 	struct drm_connector_state *conn_state;
8929 	struct drm_crtc_state *crtc_state;
8930 	struct drm_plane_state *plane_state;
8931 
8932 	if (!state)
8933 		return -ENOMEM;
8934 
8935 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8936 
8937 	/* Construct an atomic state to restore previous display setting */
8938 
8939 	/*
8940 	 * Attach connectors to drm_atomic_state
8941 	 */
8942 	conn_state = drm_atomic_get_connector_state(state, connector);
8943 
8944 	ret = PTR_ERR_OR_ZERO(conn_state);
8945 	if (ret)
8946 		goto out;
8947 
8948 	/* Attach crtc to drm_atomic_state*/
8949 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8950 
8951 	ret = PTR_ERR_OR_ZERO(crtc_state);
8952 	if (ret)
8953 		goto out;
8954 
8955 	/* force a restore */
8956 	crtc_state->mode_changed = true;
8957 
8958 	/* Attach plane to drm_atomic_state */
8959 	plane_state = drm_atomic_get_plane_state(state, plane);
8960 
8961 	ret = PTR_ERR_OR_ZERO(plane_state);
8962 	if (ret)
8963 		goto out;
8964 
8965 	/* Call commit internally with the state we just constructed */
8966 	ret = drm_atomic_commit(state);
8967 
8968 out:
8969 	drm_atomic_state_put(state);
8970 	if (ret)
8971 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8972 
8973 	return ret;
8974 }
8975 
8976 /*
8977  * This function handles all cases when set mode does not come upon hotplug.
8978  * This includes when a display is unplugged then plugged back into the
8979  * same port and when running without usermode desktop manager supprot
8980  */
8981 void dm_restore_drm_connector_state(struct drm_device *dev,
8982 				    struct drm_connector *connector)
8983 {
8984 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8985 	struct amdgpu_crtc *disconnected_acrtc;
8986 	struct dm_crtc_state *acrtc_state;
8987 
8988 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8989 		return;
8990 
8991 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8992 	if (!disconnected_acrtc)
8993 		return;
8994 
8995 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8996 	if (!acrtc_state->stream)
8997 		return;
8998 
8999 	/*
9000 	 * If the previous sink is not released and different from the current,
9001 	 * we deduce we are in a state where we can not rely on usermode call
9002 	 * to turn on the display, so we do it here
9003 	 */
9004 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9005 		dm_force_atomic_commit(&aconnector->base);
9006 }
9007 
9008 /*
9009  * Grabs all modesetting locks to serialize against any blocking commits,
9010  * Waits for completion of all non blocking commits.
9011  */
9012 static int do_aquire_global_lock(struct drm_device *dev,
9013 				 struct drm_atomic_state *state)
9014 {
9015 	struct drm_crtc *crtc;
9016 	struct drm_crtc_commit *commit;
9017 	long ret;
9018 
9019 	/*
9020 	 * Adding all modeset locks to aquire_ctx will
9021 	 * ensure that when the framework release it the
9022 	 * extra locks we are locking here will get released to
9023 	 */
9024 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9025 	if (ret)
9026 		return ret;
9027 
9028 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9029 		spin_lock(&crtc->commit_lock);
9030 		commit = list_first_entry_or_null(&crtc->commit_list,
9031 				struct drm_crtc_commit, commit_entry);
9032 		if (commit)
9033 			drm_crtc_commit_get(commit);
9034 		spin_unlock(&crtc->commit_lock);
9035 
9036 		if (!commit)
9037 			continue;
9038 
9039 		/*
9040 		 * Make sure all pending HW programming completed and
9041 		 * page flips done
9042 		 */
9043 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9044 
9045 		if (ret > 0)
9046 			ret = wait_for_completion_interruptible_timeout(
9047 					&commit->flip_done, 10*HZ);
9048 
9049 		if (ret == 0)
9050 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9051 				  "timed out\n", crtc->base.id, crtc->name);
9052 
9053 		drm_crtc_commit_put(commit);
9054 	}
9055 
9056 	return ret < 0 ? ret : 0;
9057 }
9058 
9059 static void get_freesync_config_for_crtc(
9060 	struct dm_crtc_state *new_crtc_state,
9061 	struct dm_connector_state *new_con_state)
9062 {
9063 	struct mod_freesync_config config = {0};
9064 	struct amdgpu_dm_connector *aconnector =
9065 			to_amdgpu_dm_connector(new_con_state->base.connector);
9066 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9067 	int vrefresh = drm_mode_vrefresh(mode);
9068 	bool fs_vid_mode = false;
9069 
9070 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9071 					vrefresh >= aconnector->min_vfreq &&
9072 					vrefresh <= aconnector->max_vfreq;
9073 
9074 	if (new_crtc_state->vrr_supported) {
9075 		new_crtc_state->stream->ignore_msa_timing_param = true;
9076 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9077 
9078 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9079 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9080 		config.vsif_supported = true;
9081 		config.btr = true;
9082 
9083 		if (fs_vid_mode) {
9084 			config.state = VRR_STATE_ACTIVE_FIXED;
9085 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9086 			goto out;
9087 		} else if (new_crtc_state->base.vrr_enabled) {
9088 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9089 		} else {
9090 			config.state = VRR_STATE_INACTIVE;
9091 		}
9092 	}
9093 out:
9094 	new_crtc_state->freesync_config = config;
9095 }
9096 
9097 static void reset_freesync_config_for_crtc(
9098 	struct dm_crtc_state *new_crtc_state)
9099 {
9100 	new_crtc_state->vrr_supported = false;
9101 
9102 	memset(&new_crtc_state->vrr_infopacket, 0,
9103 	       sizeof(new_crtc_state->vrr_infopacket));
9104 }
9105 
9106 static bool
9107 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9108 				 struct drm_crtc_state *new_crtc_state)
9109 {
9110 	struct drm_display_mode old_mode, new_mode;
9111 
9112 	if (!old_crtc_state || !new_crtc_state)
9113 		return false;
9114 
9115 	old_mode = old_crtc_state->mode;
9116 	new_mode = new_crtc_state->mode;
9117 
9118 	if (old_mode.clock       == new_mode.clock &&
9119 	    old_mode.hdisplay    == new_mode.hdisplay &&
9120 	    old_mode.vdisplay    == new_mode.vdisplay &&
9121 	    old_mode.htotal      == new_mode.htotal &&
9122 	    old_mode.vtotal      != new_mode.vtotal &&
9123 	    old_mode.hsync_start == new_mode.hsync_start &&
9124 	    old_mode.vsync_start != new_mode.vsync_start &&
9125 	    old_mode.hsync_end   == new_mode.hsync_end &&
9126 	    old_mode.vsync_end   != new_mode.vsync_end &&
9127 	    old_mode.hskew       == new_mode.hskew &&
9128 	    old_mode.vscan       == new_mode.vscan &&
9129 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9130 	    (new_mode.vsync_end - new_mode.vsync_start))
9131 		return true;
9132 
9133 	return false;
9134 }
9135 
9136 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9137 	uint64_t num, den, res;
9138 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9139 
9140 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9141 
9142 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9143 	den = (unsigned long long)new_crtc_state->mode.htotal *
9144 	      (unsigned long long)new_crtc_state->mode.vtotal;
9145 
9146 	res = div_u64(num, den);
9147 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9148 }
9149 
9150 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9151 				struct drm_atomic_state *state,
9152 				struct drm_crtc *crtc,
9153 				struct drm_crtc_state *old_crtc_state,
9154 				struct drm_crtc_state *new_crtc_state,
9155 				bool enable,
9156 				bool *lock_and_validation_needed)
9157 {
9158 	struct dm_atomic_state *dm_state = NULL;
9159 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9160 	struct dc_stream_state *new_stream;
9161 	int ret = 0;
9162 
9163 	/*
9164 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9165 	 * update changed items
9166 	 */
9167 	struct amdgpu_crtc *acrtc = NULL;
9168 	struct amdgpu_dm_connector *aconnector = NULL;
9169 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9170 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9171 
9172 	new_stream = NULL;
9173 
9174 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9175 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9176 	acrtc = to_amdgpu_crtc(crtc);
9177 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9178 
9179 	/* TODO This hack should go away */
9180 	if (aconnector && enable) {
9181 		/* Make sure fake sink is created in plug-in scenario */
9182 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9183 							    &aconnector->base);
9184 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9185 							    &aconnector->base);
9186 
9187 		if (IS_ERR(drm_new_conn_state)) {
9188 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9189 			goto fail;
9190 		}
9191 
9192 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9193 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9194 
9195 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9196 			goto skip_modeset;
9197 
9198 		new_stream = create_validate_stream_for_sink(aconnector,
9199 							     &new_crtc_state->mode,
9200 							     dm_new_conn_state,
9201 							     dm_old_crtc_state->stream);
9202 
9203 		/*
9204 		 * we can have no stream on ACTION_SET if a display
9205 		 * was disconnected during S3, in this case it is not an
9206 		 * error, the OS will be updated after detection, and
9207 		 * will do the right thing on next atomic commit
9208 		 */
9209 
9210 		if (!new_stream) {
9211 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9212 					__func__, acrtc->base.base.id);
9213 			ret = -ENOMEM;
9214 			goto fail;
9215 		}
9216 
9217 		/*
9218 		 * TODO: Check VSDB bits to decide whether this should
9219 		 * be enabled or not.
9220 		 */
9221 		new_stream->triggered_crtc_reset.enabled =
9222 			dm->force_timing_sync;
9223 
9224 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9225 
9226 		ret = fill_hdr_info_packet(drm_new_conn_state,
9227 					   &new_stream->hdr_static_metadata);
9228 		if (ret)
9229 			goto fail;
9230 
9231 		/*
9232 		 * If we already removed the old stream from the context
9233 		 * (and set the new stream to NULL) then we can't reuse
9234 		 * the old stream even if the stream and scaling are unchanged.
9235 		 * We'll hit the BUG_ON and black screen.
9236 		 *
9237 		 * TODO: Refactor this function to allow this check to work
9238 		 * in all conditions.
9239 		 */
9240 		if (amdgpu_freesync_vid_mode &&
9241 		    dm_new_crtc_state->stream &&
9242 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9243 			goto skip_modeset;
9244 
9245 		if (dm_new_crtc_state->stream &&
9246 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9247 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9248 			new_crtc_state->mode_changed = false;
9249 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9250 					 new_crtc_state->mode_changed);
9251 		}
9252 	}
9253 
9254 	/* mode_changed flag may get updated above, need to check again */
9255 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9256 		goto skip_modeset;
9257 
9258 	DRM_DEBUG_DRIVER(
9259 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9260 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9261 		"connectors_changed:%d\n",
9262 		acrtc->crtc_id,
9263 		new_crtc_state->enable,
9264 		new_crtc_state->active,
9265 		new_crtc_state->planes_changed,
9266 		new_crtc_state->mode_changed,
9267 		new_crtc_state->active_changed,
9268 		new_crtc_state->connectors_changed);
9269 
9270 	/* Remove stream for any changed/disabled CRTC */
9271 	if (!enable) {
9272 
9273 		if (!dm_old_crtc_state->stream)
9274 			goto skip_modeset;
9275 
9276 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9277 		    is_timing_unchanged_for_freesync(new_crtc_state,
9278 						     old_crtc_state)) {
9279 			new_crtc_state->mode_changed = false;
9280 			DRM_DEBUG_DRIVER(
9281 				"Mode change not required for front porch change, "
9282 				"setting mode_changed to %d",
9283 				new_crtc_state->mode_changed);
9284 
9285 			set_freesync_fixed_config(dm_new_crtc_state);
9286 
9287 			goto skip_modeset;
9288 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9289 			   is_freesync_video_mode(&new_crtc_state->mode,
9290 						  aconnector)) {
9291 			set_freesync_fixed_config(dm_new_crtc_state);
9292 		}
9293 
9294 		ret = dm_atomic_get_state(state, &dm_state);
9295 		if (ret)
9296 			goto fail;
9297 
9298 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9299 				crtc->base.id);
9300 
9301 		/* i.e. reset mode */
9302 		if (dc_remove_stream_from_ctx(
9303 				dm->dc,
9304 				dm_state->context,
9305 				dm_old_crtc_state->stream) != DC_OK) {
9306 			ret = -EINVAL;
9307 			goto fail;
9308 		}
9309 
9310 		dc_stream_release(dm_old_crtc_state->stream);
9311 		dm_new_crtc_state->stream = NULL;
9312 
9313 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9314 
9315 		*lock_and_validation_needed = true;
9316 
9317 	} else {/* Add stream for any updated/enabled CRTC */
9318 		/*
9319 		 * Quick fix to prevent NULL pointer on new_stream when
9320 		 * added MST connectors not found in existing crtc_state in the chained mode
9321 		 * TODO: need to dig out the root cause of that
9322 		 */
9323 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9324 			goto skip_modeset;
9325 
9326 		if (modereset_required(new_crtc_state))
9327 			goto skip_modeset;
9328 
9329 		if (modeset_required(new_crtc_state, new_stream,
9330 				     dm_old_crtc_state->stream)) {
9331 
9332 			WARN_ON(dm_new_crtc_state->stream);
9333 
9334 			ret = dm_atomic_get_state(state, &dm_state);
9335 			if (ret)
9336 				goto fail;
9337 
9338 			dm_new_crtc_state->stream = new_stream;
9339 
9340 			dc_stream_retain(new_stream);
9341 
9342 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9343 						crtc->base.id);
9344 
9345 			if (dc_add_stream_to_ctx(
9346 					dm->dc,
9347 					dm_state->context,
9348 					dm_new_crtc_state->stream) != DC_OK) {
9349 				ret = -EINVAL;
9350 				goto fail;
9351 			}
9352 
9353 			*lock_and_validation_needed = true;
9354 		}
9355 	}
9356 
9357 skip_modeset:
9358 	/* Release extra reference */
9359 	if (new_stream)
9360 		 dc_stream_release(new_stream);
9361 
9362 	/*
9363 	 * We want to do dc stream updates that do not require a
9364 	 * full modeset below.
9365 	 */
9366 	if (!(enable && aconnector && new_crtc_state->active))
9367 		return 0;
9368 	/*
9369 	 * Given above conditions, the dc state cannot be NULL because:
9370 	 * 1. We're in the process of enabling CRTCs (just been added
9371 	 *    to the dc context, or already is on the context)
9372 	 * 2. Has a valid connector attached, and
9373 	 * 3. Is currently active and enabled.
9374 	 * => The dc stream state currently exists.
9375 	 */
9376 	BUG_ON(dm_new_crtc_state->stream == NULL);
9377 
9378 	/* Scaling or underscan settings */
9379 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9380 		update_stream_scaling_settings(
9381 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9382 
9383 	/* ABM settings */
9384 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9385 
9386 	/*
9387 	 * Color management settings. We also update color properties
9388 	 * when a modeset is needed, to ensure it gets reprogrammed.
9389 	 */
9390 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9391 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9392 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9393 		if (ret)
9394 			goto fail;
9395 	}
9396 
9397 	/* Update Freesync settings. */
9398 	get_freesync_config_for_crtc(dm_new_crtc_state,
9399 				     dm_new_conn_state);
9400 
9401 	return ret;
9402 
9403 fail:
9404 	if (new_stream)
9405 		dc_stream_release(new_stream);
9406 	return ret;
9407 }
9408 
9409 static bool should_reset_plane(struct drm_atomic_state *state,
9410 			       struct drm_plane *plane,
9411 			       struct drm_plane_state *old_plane_state,
9412 			       struct drm_plane_state *new_plane_state)
9413 {
9414 	struct drm_plane *other;
9415 	struct drm_plane_state *old_other_state, *new_other_state;
9416 	struct drm_crtc_state *new_crtc_state;
9417 	int i;
9418 
9419 	/*
9420 	 * TODO: Remove this hack once the checks below are sufficient
9421 	 * enough to determine when we need to reset all the planes on
9422 	 * the stream.
9423 	 */
9424 	if (state->allow_modeset)
9425 		return true;
9426 
9427 	/* Exit early if we know that we're adding or removing the plane. */
9428 	if (old_plane_state->crtc != new_plane_state->crtc)
9429 		return true;
9430 
9431 	/* old crtc == new_crtc == NULL, plane not in context. */
9432 	if (!new_plane_state->crtc)
9433 		return false;
9434 
9435 	new_crtc_state =
9436 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9437 
9438 	if (!new_crtc_state)
9439 		return true;
9440 
9441 	/* CRTC Degamma changes currently require us to recreate planes. */
9442 	if (new_crtc_state->color_mgmt_changed)
9443 		return true;
9444 
9445 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9446 		return true;
9447 
9448 	/*
9449 	 * If there are any new primary or overlay planes being added or
9450 	 * removed then the z-order can potentially change. To ensure
9451 	 * correct z-order and pipe acquisition the current DC architecture
9452 	 * requires us to remove and recreate all existing planes.
9453 	 *
9454 	 * TODO: Come up with a more elegant solution for this.
9455 	 */
9456 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9457 		struct amdgpu_framebuffer *old_afb, *new_afb;
9458 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9459 			continue;
9460 
9461 		if (old_other_state->crtc != new_plane_state->crtc &&
9462 		    new_other_state->crtc != new_plane_state->crtc)
9463 			continue;
9464 
9465 		if (old_other_state->crtc != new_other_state->crtc)
9466 			return true;
9467 
9468 		/* Src/dst size and scaling updates. */
9469 		if (old_other_state->src_w != new_other_state->src_w ||
9470 		    old_other_state->src_h != new_other_state->src_h ||
9471 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9472 		    old_other_state->crtc_h != new_other_state->crtc_h)
9473 			return true;
9474 
9475 		/* Rotation / mirroring updates. */
9476 		if (old_other_state->rotation != new_other_state->rotation)
9477 			return true;
9478 
9479 		/* Blending updates. */
9480 		if (old_other_state->pixel_blend_mode !=
9481 		    new_other_state->pixel_blend_mode)
9482 			return true;
9483 
9484 		/* Alpha updates. */
9485 		if (old_other_state->alpha != new_other_state->alpha)
9486 			return true;
9487 
9488 		/* Colorspace changes. */
9489 		if (old_other_state->color_range != new_other_state->color_range ||
9490 		    old_other_state->color_encoding != new_other_state->color_encoding)
9491 			return true;
9492 
9493 		/* Framebuffer checks fall at the end. */
9494 		if (!old_other_state->fb || !new_other_state->fb)
9495 			continue;
9496 
9497 		/* Pixel format changes can require bandwidth updates. */
9498 		if (old_other_state->fb->format != new_other_state->fb->format)
9499 			return true;
9500 
9501 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9502 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9503 
9504 		/* Tiling and DCC changes also require bandwidth updates. */
9505 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9506 		    old_afb->base.modifier != new_afb->base.modifier)
9507 			return true;
9508 	}
9509 
9510 	return false;
9511 }
9512 
9513 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9514 			      struct drm_plane_state *new_plane_state,
9515 			      struct drm_framebuffer *fb)
9516 {
9517 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9518 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9519 	unsigned int pitch;
9520 	bool linear;
9521 
9522 	if (fb->width > new_acrtc->max_cursor_width ||
9523 	    fb->height > new_acrtc->max_cursor_height) {
9524 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9525 				 new_plane_state->fb->width,
9526 				 new_plane_state->fb->height);
9527 		return -EINVAL;
9528 	}
9529 	if (new_plane_state->src_w != fb->width << 16 ||
9530 	    new_plane_state->src_h != fb->height << 16) {
9531 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9532 		return -EINVAL;
9533 	}
9534 
9535 	/* Pitch in pixels */
9536 	pitch = fb->pitches[0] / fb->format->cpp[0];
9537 
9538 	if (fb->width != pitch) {
9539 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9540 				 fb->width, pitch);
9541 		return -EINVAL;
9542 	}
9543 
9544 	switch (pitch) {
9545 	case 64:
9546 	case 128:
9547 	case 256:
9548 		/* FB pitch is supported by cursor plane */
9549 		break;
9550 	default:
9551 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9552 		return -EINVAL;
9553 	}
9554 
9555 	/* Core DRM takes care of checking FB modifiers, so we only need to
9556 	 * check tiling flags when the FB doesn't have a modifier. */
9557 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9558 		if (adev->family < AMDGPU_FAMILY_AI) {
9559 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9560 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9561 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9562 		} else {
9563 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9564 		}
9565 		if (!linear) {
9566 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9567 			return -EINVAL;
9568 		}
9569 	}
9570 
9571 	return 0;
9572 }
9573 
9574 static int dm_update_plane_state(struct dc *dc,
9575 				 struct drm_atomic_state *state,
9576 				 struct drm_plane *plane,
9577 				 struct drm_plane_state *old_plane_state,
9578 				 struct drm_plane_state *new_plane_state,
9579 				 bool enable,
9580 				 bool *lock_and_validation_needed)
9581 {
9582 
9583 	struct dm_atomic_state *dm_state = NULL;
9584 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9585 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9586 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9587 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9588 	struct amdgpu_crtc *new_acrtc;
9589 	bool needs_reset;
9590 	int ret = 0;
9591 
9592 
9593 	new_plane_crtc = new_plane_state->crtc;
9594 	old_plane_crtc = old_plane_state->crtc;
9595 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9596 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9597 
9598 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9599 		if (!enable || !new_plane_crtc ||
9600 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9601 			return 0;
9602 
9603 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9604 
9605 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9606 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9607 			return -EINVAL;
9608 		}
9609 
9610 		if (new_plane_state->fb) {
9611 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9612 						 new_plane_state->fb);
9613 			if (ret)
9614 				return ret;
9615 		}
9616 
9617 		return 0;
9618 	}
9619 
9620 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9621 					 new_plane_state);
9622 
9623 	/* Remove any changed/removed planes */
9624 	if (!enable) {
9625 		if (!needs_reset)
9626 			return 0;
9627 
9628 		if (!old_plane_crtc)
9629 			return 0;
9630 
9631 		old_crtc_state = drm_atomic_get_old_crtc_state(
9632 				state, old_plane_crtc);
9633 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9634 
9635 		if (!dm_old_crtc_state->stream)
9636 			return 0;
9637 
9638 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9639 				plane->base.id, old_plane_crtc->base.id);
9640 
9641 		ret = dm_atomic_get_state(state, &dm_state);
9642 		if (ret)
9643 			return ret;
9644 
9645 		if (!dc_remove_plane_from_context(
9646 				dc,
9647 				dm_old_crtc_state->stream,
9648 				dm_old_plane_state->dc_state,
9649 				dm_state->context)) {
9650 
9651 			return -EINVAL;
9652 		}
9653 
9654 
9655 		dc_plane_state_release(dm_old_plane_state->dc_state);
9656 		dm_new_plane_state->dc_state = NULL;
9657 
9658 		*lock_and_validation_needed = true;
9659 
9660 	} else { /* Add new planes */
9661 		struct dc_plane_state *dc_new_plane_state;
9662 
9663 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9664 			return 0;
9665 
9666 		if (!new_plane_crtc)
9667 			return 0;
9668 
9669 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9670 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9671 
9672 		if (!dm_new_crtc_state->stream)
9673 			return 0;
9674 
9675 		if (!needs_reset)
9676 			return 0;
9677 
9678 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9679 		if (ret)
9680 			return ret;
9681 
9682 		WARN_ON(dm_new_plane_state->dc_state);
9683 
9684 		dc_new_plane_state = dc_create_plane_state(dc);
9685 		if (!dc_new_plane_state)
9686 			return -ENOMEM;
9687 
9688 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9689 				plane->base.id, new_plane_crtc->base.id);
9690 
9691 		ret = fill_dc_plane_attributes(
9692 			drm_to_adev(new_plane_crtc->dev),
9693 			dc_new_plane_state,
9694 			new_plane_state,
9695 			new_crtc_state);
9696 		if (ret) {
9697 			dc_plane_state_release(dc_new_plane_state);
9698 			return ret;
9699 		}
9700 
9701 		ret = dm_atomic_get_state(state, &dm_state);
9702 		if (ret) {
9703 			dc_plane_state_release(dc_new_plane_state);
9704 			return ret;
9705 		}
9706 
9707 		/*
9708 		 * Any atomic check errors that occur after this will
9709 		 * not need a release. The plane state will be attached
9710 		 * to the stream, and therefore part of the atomic
9711 		 * state. It'll be released when the atomic state is
9712 		 * cleaned.
9713 		 */
9714 		if (!dc_add_plane_to_context(
9715 				dc,
9716 				dm_new_crtc_state->stream,
9717 				dc_new_plane_state,
9718 				dm_state->context)) {
9719 
9720 			dc_plane_state_release(dc_new_plane_state);
9721 			return -EINVAL;
9722 		}
9723 
9724 		dm_new_plane_state->dc_state = dc_new_plane_state;
9725 
9726 		/* Tell DC to do a full surface update every time there
9727 		 * is a plane change. Inefficient, but works for now.
9728 		 */
9729 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9730 
9731 		*lock_and_validation_needed = true;
9732 	}
9733 
9734 
9735 	return ret;
9736 }
9737 
9738 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9739 				struct drm_crtc *crtc,
9740 				struct drm_crtc_state *new_crtc_state)
9741 {
9742 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9743 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9744 
9745 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9746 	 * cursor per pipe but it's going to inherit the scaling and
9747 	 * positioning from the underlying pipe. Check the cursor plane's
9748 	 * blending properties match the primary plane's. */
9749 
9750 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9751 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9752 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9753 		return 0;
9754 	}
9755 
9756 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9757 			 (new_cursor_state->src_w >> 16);
9758 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9759 			 (new_cursor_state->src_h >> 16);
9760 
9761 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9762 			 (new_primary_state->src_w >> 16);
9763 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9764 			 (new_primary_state->src_h >> 16);
9765 
9766 	if (cursor_scale_w != primary_scale_w ||
9767 	    cursor_scale_h != primary_scale_h) {
9768 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9769 		return -EINVAL;
9770 	}
9771 
9772 	return 0;
9773 }
9774 
9775 #if defined(CONFIG_DRM_AMD_DC_DCN)
9776 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9777 {
9778 	struct drm_connector *connector;
9779 	struct drm_connector_state *conn_state;
9780 	struct amdgpu_dm_connector *aconnector = NULL;
9781 	int i;
9782 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9783 		if (conn_state->crtc != crtc)
9784 			continue;
9785 
9786 		aconnector = to_amdgpu_dm_connector(connector);
9787 		if (!aconnector->port || !aconnector->mst_port)
9788 			aconnector = NULL;
9789 		else
9790 			break;
9791 	}
9792 
9793 	if (!aconnector)
9794 		return 0;
9795 
9796 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9797 }
9798 #endif
9799 
9800 /**
9801  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9802  * @dev: The DRM device
9803  * @state: The atomic state to commit
9804  *
9805  * Validate that the given atomic state is programmable by DC into hardware.
9806  * This involves constructing a &struct dc_state reflecting the new hardware
9807  * state we wish to commit, then querying DC to see if it is programmable. It's
9808  * important not to modify the existing DC state. Otherwise, atomic_check
9809  * may unexpectedly commit hardware changes.
9810  *
9811  * When validating the DC state, it's important that the right locks are
9812  * acquired. For full updates case which removes/adds/updates streams on one
9813  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9814  * that any such full update commit will wait for completion of any outstanding
9815  * flip using DRMs synchronization events.
9816  *
9817  * Note that DM adds the affected connectors for all CRTCs in state, when that
9818  * might not seem necessary. This is because DC stream creation requires the
9819  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9820  * be possible but non-trivial - a possible TODO item.
9821  *
9822  * Return: -Error code if validation failed.
9823  */
9824 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9825 				  struct drm_atomic_state *state)
9826 {
9827 	struct amdgpu_device *adev = drm_to_adev(dev);
9828 	struct dm_atomic_state *dm_state = NULL;
9829 	struct dc *dc = adev->dm.dc;
9830 	struct drm_connector *connector;
9831 	struct drm_connector_state *old_con_state, *new_con_state;
9832 	struct drm_crtc *crtc;
9833 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9834 	struct drm_plane *plane;
9835 	struct drm_plane_state *old_plane_state, *new_plane_state;
9836 	enum dc_status status;
9837 	int ret, i;
9838 	bool lock_and_validation_needed = false;
9839 	struct dm_crtc_state *dm_old_crtc_state;
9840 
9841 	trace_amdgpu_dm_atomic_check_begin(state);
9842 
9843 	ret = drm_atomic_helper_check_modeset(dev, state);
9844 	if (ret)
9845 		goto fail;
9846 
9847 	/* Check connector changes */
9848 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9849 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9850 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9851 
9852 		/* Skip connectors that are disabled or part of modeset already. */
9853 		if (!old_con_state->crtc && !new_con_state->crtc)
9854 			continue;
9855 
9856 		if (!new_con_state->crtc)
9857 			continue;
9858 
9859 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9860 		if (IS_ERR(new_crtc_state)) {
9861 			ret = PTR_ERR(new_crtc_state);
9862 			goto fail;
9863 		}
9864 
9865 		if (dm_old_con_state->abm_level !=
9866 		    dm_new_con_state->abm_level)
9867 			new_crtc_state->connectors_changed = true;
9868 	}
9869 
9870 #if defined(CONFIG_DRM_AMD_DC_DCN)
9871 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9872 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9873 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9874 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9875 				if (ret)
9876 					goto fail;
9877 			}
9878 		}
9879 	}
9880 #endif
9881 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9882 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9883 
9884 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9885 		    !new_crtc_state->color_mgmt_changed &&
9886 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9887 			dm_old_crtc_state->dsc_force_changed == false)
9888 			continue;
9889 
9890 		if (!new_crtc_state->enable)
9891 			continue;
9892 
9893 		ret = drm_atomic_add_affected_connectors(state, crtc);
9894 		if (ret)
9895 			return ret;
9896 
9897 		ret = drm_atomic_add_affected_planes(state, crtc);
9898 		if (ret)
9899 			goto fail;
9900 
9901 		if (dm_old_crtc_state->dsc_force_changed)
9902 			new_crtc_state->mode_changed = true;
9903 	}
9904 
9905 	/*
9906 	 * Add all primary and overlay planes on the CRTC to the state
9907 	 * whenever a plane is enabled to maintain correct z-ordering
9908 	 * and to enable fast surface updates.
9909 	 */
9910 	drm_for_each_crtc(crtc, dev) {
9911 		bool modified = false;
9912 
9913 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9914 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9915 				continue;
9916 
9917 			if (new_plane_state->crtc == crtc ||
9918 			    old_plane_state->crtc == crtc) {
9919 				modified = true;
9920 				break;
9921 			}
9922 		}
9923 
9924 		if (!modified)
9925 			continue;
9926 
9927 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9928 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9929 				continue;
9930 
9931 			new_plane_state =
9932 				drm_atomic_get_plane_state(state, plane);
9933 
9934 			if (IS_ERR(new_plane_state)) {
9935 				ret = PTR_ERR(new_plane_state);
9936 				goto fail;
9937 			}
9938 		}
9939 	}
9940 
9941 	/* Remove exiting planes if they are modified */
9942 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9943 		ret = dm_update_plane_state(dc, state, plane,
9944 					    old_plane_state,
9945 					    new_plane_state,
9946 					    false,
9947 					    &lock_and_validation_needed);
9948 		if (ret)
9949 			goto fail;
9950 	}
9951 
9952 	/* Disable all crtcs which require disable */
9953 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9954 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9955 					   old_crtc_state,
9956 					   new_crtc_state,
9957 					   false,
9958 					   &lock_and_validation_needed);
9959 		if (ret)
9960 			goto fail;
9961 	}
9962 
9963 	/* Enable all crtcs which require enable */
9964 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9965 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9966 					   old_crtc_state,
9967 					   new_crtc_state,
9968 					   true,
9969 					   &lock_and_validation_needed);
9970 		if (ret)
9971 			goto fail;
9972 	}
9973 
9974 	/* Add new/modified planes */
9975 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9976 		ret = dm_update_plane_state(dc, state, plane,
9977 					    old_plane_state,
9978 					    new_plane_state,
9979 					    true,
9980 					    &lock_and_validation_needed);
9981 		if (ret)
9982 			goto fail;
9983 	}
9984 
9985 	/* Run this here since we want to validate the streams we created */
9986 	ret = drm_atomic_helper_check_planes(dev, state);
9987 	if (ret)
9988 		goto fail;
9989 
9990 	/* Check cursor planes scaling */
9991 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9992 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9993 		if (ret)
9994 			goto fail;
9995 	}
9996 
9997 	if (state->legacy_cursor_update) {
9998 		/*
9999 		 * This is a fast cursor update coming from the plane update
10000 		 * helper, check if it can be done asynchronously for better
10001 		 * performance.
10002 		 */
10003 		state->async_update =
10004 			!drm_atomic_helper_async_check(dev, state);
10005 
10006 		/*
10007 		 * Skip the remaining global validation if this is an async
10008 		 * update. Cursor updates can be done without affecting
10009 		 * state or bandwidth calcs and this avoids the performance
10010 		 * penalty of locking the private state object and
10011 		 * allocating a new dc_state.
10012 		 */
10013 		if (state->async_update)
10014 			return 0;
10015 	}
10016 
10017 	/* Check scaling and underscan changes*/
10018 	/* TODO Removed scaling changes validation due to inability to commit
10019 	 * new stream into context w\o causing full reset. Need to
10020 	 * decide how to handle.
10021 	 */
10022 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10023 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10024 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10025 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10026 
10027 		/* Skip any modesets/resets */
10028 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10029 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10030 			continue;
10031 
10032 		/* Skip any thing not scale or underscan changes */
10033 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10034 			continue;
10035 
10036 		lock_and_validation_needed = true;
10037 	}
10038 
10039 	/**
10040 	 * Streams and planes are reset when there are changes that affect
10041 	 * bandwidth. Anything that affects bandwidth needs to go through
10042 	 * DC global validation to ensure that the configuration can be applied
10043 	 * to hardware.
10044 	 *
10045 	 * We have to currently stall out here in atomic_check for outstanding
10046 	 * commits to finish in this case because our IRQ handlers reference
10047 	 * DRM state directly - we can end up disabling interrupts too early
10048 	 * if we don't.
10049 	 *
10050 	 * TODO: Remove this stall and drop DM state private objects.
10051 	 */
10052 	if (lock_and_validation_needed) {
10053 		ret = dm_atomic_get_state(state, &dm_state);
10054 		if (ret)
10055 			goto fail;
10056 
10057 		ret = do_aquire_global_lock(dev, state);
10058 		if (ret)
10059 			goto fail;
10060 
10061 #if defined(CONFIG_DRM_AMD_DC_DCN)
10062 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10063 			goto fail;
10064 
10065 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10066 		if (ret)
10067 			goto fail;
10068 #endif
10069 
10070 		/*
10071 		 * Perform validation of MST topology in the state:
10072 		 * We need to perform MST atomic check before calling
10073 		 * dc_validate_global_state(), or there is a chance
10074 		 * to get stuck in an infinite loop and hang eventually.
10075 		 */
10076 		ret = drm_dp_mst_atomic_check(state);
10077 		if (ret)
10078 			goto fail;
10079 		status = dc_validate_global_state(dc, dm_state->context, false);
10080 		if (status != DC_OK) {
10081 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10082 				       dc_status_to_str(status), status);
10083 			ret = -EINVAL;
10084 			goto fail;
10085 		}
10086 	} else {
10087 		/*
10088 		 * The commit is a fast update. Fast updates shouldn't change
10089 		 * the DC context, affect global validation, and can have their
10090 		 * commit work done in parallel with other commits not touching
10091 		 * the same resource. If we have a new DC context as part of
10092 		 * the DM atomic state from validation we need to free it and
10093 		 * retain the existing one instead.
10094 		 *
10095 		 * Furthermore, since the DM atomic state only contains the DC
10096 		 * context and can safely be annulled, we can free the state
10097 		 * and clear the associated private object now to free
10098 		 * some memory and avoid a possible use-after-free later.
10099 		 */
10100 
10101 		for (i = 0; i < state->num_private_objs; i++) {
10102 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10103 
10104 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10105 				int j = state->num_private_objs-1;
10106 
10107 				dm_atomic_destroy_state(obj,
10108 						state->private_objs[i].state);
10109 
10110 				/* If i is not at the end of the array then the
10111 				 * last element needs to be moved to where i was
10112 				 * before the array can safely be truncated.
10113 				 */
10114 				if (i != j)
10115 					state->private_objs[i] =
10116 						state->private_objs[j];
10117 
10118 				state->private_objs[j].ptr = NULL;
10119 				state->private_objs[j].state = NULL;
10120 				state->private_objs[j].old_state = NULL;
10121 				state->private_objs[j].new_state = NULL;
10122 
10123 				state->num_private_objs = j;
10124 				break;
10125 			}
10126 		}
10127 	}
10128 
10129 	/* Store the overall update type for use later in atomic check. */
10130 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10131 		struct dm_crtc_state *dm_new_crtc_state =
10132 			to_dm_crtc_state(new_crtc_state);
10133 
10134 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10135 							 UPDATE_TYPE_FULL :
10136 							 UPDATE_TYPE_FAST;
10137 	}
10138 
10139 	/* Must be success */
10140 	WARN_ON(ret);
10141 
10142 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10143 
10144 	return ret;
10145 
10146 fail:
10147 	if (ret == -EDEADLK)
10148 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10149 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10150 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10151 	else
10152 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10153 
10154 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10155 
10156 	return ret;
10157 }
10158 
10159 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10160 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10161 {
10162 	uint8_t dpcd_data;
10163 	bool capable = false;
10164 
10165 	if (amdgpu_dm_connector->dc_link &&
10166 		dm_helpers_dp_read_dpcd(
10167 				NULL,
10168 				amdgpu_dm_connector->dc_link,
10169 				DP_DOWN_STREAM_PORT_COUNT,
10170 				&dpcd_data,
10171 				sizeof(dpcd_data))) {
10172 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10173 	}
10174 
10175 	return capable;
10176 }
10177 
10178 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10179 		uint8_t *edid_ext, int len,
10180 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10181 {
10182 	int i;
10183 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10184 	struct dc *dc = adev->dm.dc;
10185 
10186 	/* send extension block to DMCU for parsing */
10187 	for (i = 0; i < len; i += 8) {
10188 		bool res;
10189 		int offset;
10190 
10191 		/* send 8 bytes a time */
10192 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10193 			return false;
10194 
10195 		if (i+8 == len) {
10196 			/* EDID block sent completed, expect result */
10197 			int version, min_rate, max_rate;
10198 
10199 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10200 			if (res) {
10201 				/* amd vsdb found */
10202 				vsdb_info->freesync_supported = 1;
10203 				vsdb_info->amd_vsdb_version = version;
10204 				vsdb_info->min_refresh_rate_hz = min_rate;
10205 				vsdb_info->max_refresh_rate_hz = max_rate;
10206 				return true;
10207 			}
10208 			/* not amd vsdb */
10209 			return false;
10210 		}
10211 
10212 		/* check for ack*/
10213 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10214 		if (!res)
10215 			return false;
10216 	}
10217 
10218 	return false;
10219 }
10220 
10221 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10222 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10223 {
10224 	uint8_t *edid_ext = NULL;
10225 	int i;
10226 	bool valid_vsdb_found = false;
10227 
10228 	/*----- drm_find_cea_extension() -----*/
10229 	/* No EDID or EDID extensions */
10230 	if (edid == NULL || edid->extensions == 0)
10231 		return -ENODEV;
10232 
10233 	/* Find CEA extension */
10234 	for (i = 0; i < edid->extensions; i++) {
10235 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10236 		if (edid_ext[0] == CEA_EXT)
10237 			break;
10238 	}
10239 
10240 	if (i == edid->extensions)
10241 		return -ENODEV;
10242 
10243 	/*----- cea_db_offsets() -----*/
10244 	if (edid_ext[0] != CEA_EXT)
10245 		return -ENODEV;
10246 
10247 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10248 
10249 	return valid_vsdb_found ? i : -ENODEV;
10250 }
10251 
10252 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10253 					struct edid *edid)
10254 {
10255 	int i = 0;
10256 	struct detailed_timing *timing;
10257 	struct detailed_non_pixel *data;
10258 	struct detailed_data_monitor_range *range;
10259 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10260 			to_amdgpu_dm_connector(connector);
10261 	struct dm_connector_state *dm_con_state = NULL;
10262 
10263 	struct drm_device *dev = connector->dev;
10264 	struct amdgpu_device *adev = drm_to_adev(dev);
10265 	bool freesync_capable = false;
10266 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10267 
10268 	if (!connector->state) {
10269 		DRM_ERROR("%s - Connector has no state", __func__);
10270 		goto update;
10271 	}
10272 
10273 	if (!edid) {
10274 		dm_con_state = to_dm_connector_state(connector->state);
10275 
10276 		amdgpu_dm_connector->min_vfreq = 0;
10277 		amdgpu_dm_connector->max_vfreq = 0;
10278 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10279 
10280 		goto update;
10281 	}
10282 
10283 	dm_con_state = to_dm_connector_state(connector->state);
10284 
10285 	if (!amdgpu_dm_connector->dc_sink) {
10286 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10287 		goto update;
10288 	}
10289 	if (!adev->dm.freesync_module)
10290 		goto update;
10291 
10292 
10293 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10294 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10295 		bool edid_check_required = false;
10296 
10297 		if (edid) {
10298 			edid_check_required = is_dp_capable_without_timing_msa(
10299 						adev->dm.dc,
10300 						amdgpu_dm_connector);
10301 		}
10302 
10303 		if (edid_check_required == true && (edid->version > 1 ||
10304 		   (edid->version == 1 && edid->revision > 1))) {
10305 			for (i = 0; i < 4; i++) {
10306 
10307 				timing	= &edid->detailed_timings[i];
10308 				data	= &timing->data.other_data;
10309 				range	= &data->data.range;
10310 				/*
10311 				 * Check if monitor has continuous frequency mode
10312 				 */
10313 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10314 					continue;
10315 				/*
10316 				 * Check for flag range limits only. If flag == 1 then
10317 				 * no additional timing information provided.
10318 				 * Default GTF, GTF Secondary curve and CVT are not
10319 				 * supported
10320 				 */
10321 				if (range->flags != 1)
10322 					continue;
10323 
10324 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10325 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10326 				amdgpu_dm_connector->pixel_clock_mhz =
10327 					range->pixel_clock_mhz * 10;
10328 
10329 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10330 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10331 
10332 				break;
10333 			}
10334 
10335 			if (amdgpu_dm_connector->max_vfreq -
10336 			    amdgpu_dm_connector->min_vfreq > 10) {
10337 
10338 				freesync_capable = true;
10339 			}
10340 		}
10341 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10342 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10343 		if (i >= 0 && vsdb_info.freesync_supported) {
10344 			timing  = &edid->detailed_timings[i];
10345 			data    = &timing->data.other_data;
10346 
10347 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10348 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10349 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10350 				freesync_capable = true;
10351 
10352 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10353 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10354 		}
10355 	}
10356 
10357 update:
10358 	if (dm_con_state)
10359 		dm_con_state->freesync_capable = freesync_capable;
10360 
10361 	if (connector->vrr_capable_property)
10362 		drm_connector_set_vrr_capable_property(connector,
10363 						       freesync_capable);
10364 }
10365 
10366 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10367 {
10368 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10369 
10370 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10371 		return;
10372 	if (link->type == dc_connection_none)
10373 		return;
10374 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10375 					dpcd_data, sizeof(dpcd_data))) {
10376 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10377 
10378 		if (dpcd_data[0] == 0) {
10379 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10380 			link->psr_settings.psr_feature_enabled = false;
10381 		} else {
10382 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10383 			link->psr_settings.psr_feature_enabled = true;
10384 		}
10385 
10386 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10387 	}
10388 }
10389 
10390 /*
10391  * amdgpu_dm_link_setup_psr() - configure psr link
10392  * @stream: stream state
10393  *
10394  * Return: true if success
10395  */
10396 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10397 {
10398 	struct dc_link *link = NULL;
10399 	struct psr_config psr_config = {0};
10400 	struct psr_context psr_context = {0};
10401 	bool ret = false;
10402 
10403 	if (stream == NULL)
10404 		return false;
10405 
10406 	link = stream->link;
10407 
10408 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10409 
10410 	if (psr_config.psr_version > 0) {
10411 		psr_config.psr_exit_link_training_required = 0x1;
10412 		psr_config.psr_frame_capture_indication_req = 0;
10413 		psr_config.psr_rfb_setup_time = 0x37;
10414 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10415 		psr_config.allow_smu_optimizations = 0x0;
10416 
10417 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10418 
10419 	}
10420 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10421 
10422 	return ret;
10423 }
10424 
10425 /*
10426  * amdgpu_dm_psr_enable() - enable psr f/w
10427  * @stream: stream state
10428  *
10429  * Return: true if success
10430  */
10431 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10432 {
10433 	struct dc_link *link = stream->link;
10434 	unsigned int vsync_rate_hz = 0;
10435 	struct dc_static_screen_params params = {0};
10436 	/* Calculate number of static frames before generating interrupt to
10437 	 * enter PSR.
10438 	 */
10439 	// Init fail safe of 2 frames static
10440 	unsigned int num_frames_static = 2;
10441 
10442 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10443 
10444 	vsync_rate_hz = div64_u64(div64_u64((
10445 			stream->timing.pix_clk_100hz * 100),
10446 			stream->timing.v_total),
10447 			stream->timing.h_total);
10448 
10449 	/* Round up
10450 	 * Calculate number of frames such that at least 30 ms of time has
10451 	 * passed.
10452 	 */
10453 	if (vsync_rate_hz != 0) {
10454 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10455 		num_frames_static = (30000 / frame_time_microsec) + 1;
10456 	}
10457 
10458 	params.triggers.cursor_update = true;
10459 	params.triggers.overlay_update = true;
10460 	params.triggers.surface_update = true;
10461 	params.num_frames = num_frames_static;
10462 
10463 	dc_stream_set_static_screen_params(link->ctx->dc,
10464 					   &stream, 1,
10465 					   &params);
10466 
10467 	return dc_link_set_psr_allow_active(link, true, false, false);
10468 }
10469 
10470 /*
10471  * amdgpu_dm_psr_disable() - disable psr f/w
10472  * @stream:  stream state
10473  *
10474  * Return: true if success
10475  */
10476 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10477 {
10478 
10479 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10480 
10481 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10482 }
10483 
10484 /*
10485  * amdgpu_dm_psr_disable() - disable psr f/w
10486  * if psr is enabled on any stream
10487  *
10488  * Return: true if success
10489  */
10490 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10491 {
10492 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10493 	return dc_set_psr_allow_active(dm->dc, false);
10494 }
10495 
10496 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10497 {
10498 	struct amdgpu_device *adev = drm_to_adev(dev);
10499 	struct dc *dc = adev->dm.dc;
10500 	int i;
10501 
10502 	mutex_lock(&adev->dm.dc_lock);
10503 	if (dc->current_state) {
10504 		for (i = 0; i < dc->current_state->stream_count; ++i)
10505 			dc->current_state->streams[i]
10506 				->triggered_crtc_reset.enabled =
10507 				adev->dm.force_timing_sync;
10508 
10509 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10510 		dc_trigger_sync(dc, dc->current_state);
10511 	}
10512 	mutex_unlock(&adev->dm.dc_lock);
10513 }
10514 
10515 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10516 		       uint32_t value, const char *func_name)
10517 {
10518 #ifdef DM_CHECK_ADDR_0
10519 	if (address == 0) {
10520 		DC_ERR("invalid register write. address = 0");
10521 		return;
10522 	}
10523 #endif
10524 	cgs_write_register(ctx->cgs_device, address, value);
10525 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10526 }
10527 
10528 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10529 			  const char *func_name)
10530 {
10531 	uint32_t value;
10532 #ifdef DM_CHECK_ADDR_0
10533 	if (address == 0) {
10534 		DC_ERR("invalid register read; address = 0\n");
10535 		return 0;
10536 	}
10537 #endif
10538 
10539 	if (ctx->dmub_srv &&
10540 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10541 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10542 		ASSERT(false);
10543 		return 0;
10544 	}
10545 
10546 	value = cgs_read_register(ctx->cgs_device, address);
10547 
10548 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10549 
10550 	return value;
10551 }
10552