xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision faf26f2b12e1e03956f7e628183e422d94713e4b)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
118 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
120 
121 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
123 
124 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126 
127 /* Number of bytes in PSP header for firmware. */
128 #define PSP_HEADER_BYTES 0x100
129 
130 /* Number of bytes in PSP footer for firmware. */
131 #define PSP_FOOTER_BYTES 0x100
132 
133 /**
134  * DOC: overview
135  *
136  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
137  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
138  * requests into DC requests, and DC responses into DRM responses.
139  *
140  * The root control structure is &struct amdgpu_display_manager.
141  */
142 
143 /* basic init/fini API */
144 static int amdgpu_dm_init(struct amdgpu_device *adev);
145 static void amdgpu_dm_fini(struct amdgpu_device *adev);
146 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
147 
148 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149 {
150 	switch (link->dpcd_caps.dongle_type) {
151 	case DISPLAY_DONGLE_NONE:
152 		return DRM_MODE_SUBCONNECTOR_Native;
153 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154 		return DRM_MODE_SUBCONNECTOR_VGA;
155 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_DVID;
158 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160 		return DRM_MODE_SUBCONNECTOR_HDMIA;
161 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162 	default:
163 		return DRM_MODE_SUBCONNECTOR_Unknown;
164 	}
165 }
166 
167 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168 {
169 	struct dc_link *link = aconnector->dc_link;
170 	struct drm_connector *connector = &aconnector->base;
171 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172 
173 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174 		return;
175 
176 	if (aconnector->dc_sink)
177 		subconnector = get_subconnector_type(link);
178 
179 	drm_object_property_set_value(&connector->base,
180 			connector->dev->mode_config.dp_subconnector_property,
181 			subconnector);
182 }
183 
184 /*
185  * initializes drm_device display related structures, based on the information
186  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187  * drm_encoder, drm_mode_config
188  *
189  * Returns 0 on success
190  */
191 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192 /* removes and deallocates the drm structures, created by the above function */
193 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194 
195 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
196 				struct drm_plane *plane,
197 				unsigned long possible_crtcs,
198 				const struct dc_plane_cap *plane_cap);
199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200 			       struct drm_plane *plane,
201 			       uint32_t link_index);
202 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
204 				    uint32_t link_index,
205 				    struct amdgpu_encoder *amdgpu_encoder);
206 static int amdgpu_dm_encoder_init(struct drm_device *dev,
207 				  struct amdgpu_encoder *aencoder,
208 				  uint32_t link_index);
209 
210 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211 
212 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213 
214 static int amdgpu_dm_atomic_check(struct drm_device *dev,
215 				  struct drm_atomic_state *state);
216 
217 static void handle_cursor_update(struct drm_plane *plane,
218 				 struct drm_plane_state *old_plane_state);
219 
220 static const struct drm_format_info *
221 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222 
223 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
224 static void handle_hpd_rx_irq(void *param);
225 
226 static bool
227 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228 				 struct drm_crtc_state *new_crtc_state);
229 /*
230  * dm_vblank_get_counter
231  *
232  * @brief
233  * Get counter for number of vertical blanks
234  *
235  * @param
236  * struct amdgpu_device *adev - [in] desired amdgpu device
237  * int disp_idx - [in] which CRTC to get the counter from
238  *
239  * @return
240  * Counter for vertical blanks
241  */
242 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244 	if (crtc >= adev->mode_info.num_crtc)
245 		return 0;
246 	else {
247 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248 
249 		if (acrtc->dm_irq_params.stream == NULL) {
250 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251 				  crtc);
252 			return 0;
253 		}
254 
255 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256 	}
257 }
258 
259 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260 				  u32 *vbl, u32 *position)
261 {
262 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
263 
264 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265 		return -EINVAL;
266 	else {
267 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268 
269 		if (acrtc->dm_irq_params.stream ==  NULL) {
270 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 				  crtc);
272 			return 0;
273 		}
274 
275 		/*
276 		 * TODO rework base driver to use values directly.
277 		 * for now parse it back into reg-format
278 		 */
279 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 					 &v_blank_start,
281 					 &v_blank_end,
282 					 &h_position,
283 					 &v_position);
284 
285 		*position = v_position | (h_position << 16);
286 		*vbl = v_blank_start | (v_blank_end << 16);
287 	}
288 
289 	return 0;
290 }
291 
292 static bool dm_is_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return true;
296 }
297 
298 static int dm_wait_for_idle(void *handle)
299 {
300 	/* XXX todo */
301 	return 0;
302 }
303 
304 static bool dm_check_soft_reset(void *handle)
305 {
306 	return false;
307 }
308 
309 static int dm_soft_reset(void *handle)
310 {
311 	/* XXX todo */
312 	return 0;
313 }
314 
315 static struct amdgpu_crtc *
316 get_crtc_by_otg_inst(struct amdgpu_device *adev,
317 		     int otg_inst)
318 {
319 	struct drm_device *dev = adev_to_drm(adev);
320 	struct drm_crtc *crtc;
321 	struct amdgpu_crtc *amdgpu_crtc;
322 
323 	if (WARN_ON(otg_inst == -1))
324 		return adev->mode_info.crtcs[0];
325 
326 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327 		amdgpu_crtc = to_amdgpu_crtc(crtc);
328 
329 		if (amdgpu_crtc->otg_inst == otg_inst)
330 			return amdgpu_crtc;
331 	}
332 
333 	return NULL;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337 {
338 	return acrtc->dm_irq_params.freesync_config.state ==
339 		       VRR_STATE_ACTIVE_VARIABLE ||
340 	       acrtc->dm_irq_params.freesync_config.state ==
341 		       VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345 {
346 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 }
349 
350 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351 					      struct dm_crtc_state *new_state)
352 {
353 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
354 		return true;
355 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356 		return true;
357 	else
358 		return false;
359 }
360 
361 /**
362  * dm_pflip_high_irq() - Handle pageflip interrupt
363  * @interrupt_params: ignored
364  *
365  * Handles the pageflip interrupt by notifying all interested parties
366  * that the pageflip has been completed.
367  */
368 static void dm_pflip_high_irq(void *interrupt_params)
369 {
370 	struct amdgpu_crtc *amdgpu_crtc;
371 	struct common_irq_params *irq_params = interrupt_params;
372 	struct amdgpu_device *adev = irq_params->adev;
373 	unsigned long flags;
374 	struct drm_pending_vblank_event *e;
375 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
376 	bool vrr_active;
377 
378 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379 
380 	/* IRQ could occur when in initial stage */
381 	/* TODO work and BO cleanup */
382 	if (amdgpu_crtc == NULL) {
383 		DC_LOG_PFLIP("CRTC is null, returning.\n");
384 		return;
385 	}
386 
387 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
388 
389 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
390 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
391 						 amdgpu_crtc->pflip_status,
392 						 AMDGPU_FLIP_SUBMITTED,
393 						 amdgpu_crtc->crtc_id,
394 						 amdgpu_crtc);
395 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 		return;
397 	}
398 
399 	/* page flip completed. */
400 	e = amdgpu_crtc->event;
401 	amdgpu_crtc->event = NULL;
402 
403 	WARN_ON(!e);
404 
405 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
406 
407 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
408 	if (!vrr_active ||
409 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
410 				      &v_blank_end, &hpos, &vpos) ||
411 	    (vpos < v_blank_start)) {
412 		/* Update to correct count and vblank timestamp if racing with
413 		 * vblank irq. This also updates to the correct vblank timestamp
414 		 * even in VRR mode, as scanout is past the front-porch atm.
415 		 */
416 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
417 
418 		/* Wake up userspace by sending the pageflip event with proper
419 		 * count and timestamp of vblank of flip completion.
420 		 */
421 		if (e) {
422 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423 
424 			/* Event sent, so done with vblank for this flip */
425 			drm_crtc_vblank_put(&amdgpu_crtc->base);
426 		}
427 	} else if (e) {
428 		/* VRR active and inside front-porch: vblank count and
429 		 * timestamp for pageflip event will only be up to date after
430 		 * drm_crtc_handle_vblank() has been executed from late vblank
431 		 * irq handler after start of back-porch (vline 0). We queue the
432 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
433 		 * updated timestamp and count, once it runs after us.
434 		 *
435 		 * We need to open-code this instead of using the helper
436 		 * drm_crtc_arm_vblank_event(), as that helper would
437 		 * call drm_crtc_accurate_vblank_count(), which we must
438 		 * not call in VRR mode while we are in front-porch!
439 		 */
440 
441 		/* sequence will be replaced by real count during send-out. */
442 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443 		e->pipe = amdgpu_crtc->crtc_id;
444 
445 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 		e = NULL;
447 	}
448 
449 	/* Keep track of vblank of this flip for flip throttling. We use the
450 	 * cooked hw counter, as that one incremented at start of this vblank
451 	 * of pageflip completion, so last_flip_vblank is the forbidden count
452 	 * for queueing new pageflips if vsync + VRR is enabled.
453 	 */
454 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
455 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
456 
457 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
458 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
459 
460 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
462 		     vrr_active, (int) !e);
463 }
464 
465 static void dm_vupdate_high_irq(void *interrupt_params)
466 {
467 	struct common_irq_params *irq_params = interrupt_params;
468 	struct amdgpu_device *adev = irq_params->adev;
469 	struct amdgpu_crtc *acrtc;
470 	struct drm_device *drm_dev;
471 	struct drm_vblank_crtc *vblank;
472 	ktime_t frame_duration_ns, previous_timestamp;
473 	unsigned long flags;
474 	int vrr_active;
475 
476 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477 
478 	if (acrtc) {
479 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
480 		drm_dev = acrtc->base.dev;
481 		vblank = &drm_dev->vblank[acrtc->base.index];
482 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483 		frame_duration_ns = vblank->time - previous_timestamp;
484 
485 		if (frame_duration_ns > 0) {
486 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
487 						frame_duration_ns,
488 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
490 		}
491 
492 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 			      acrtc->crtc_id,
494 			      vrr_active);
495 
496 		/* Core vblank handling is done here after end of front-porch in
497 		 * vrr mode, as vblank timestamping will give valid results
498 		 * while now done after front-porch. This will also deliver
499 		 * page-flip completion events that have been queued to us
500 		 * if a pageflip happened inside front-porch.
501 		 */
502 		if (vrr_active) {
503 			drm_crtc_handle_vblank(&acrtc->base);
504 
505 			/* BTR processing for pre-DCE12 ASICs */
506 			if (acrtc->dm_irq_params.stream &&
507 			    adev->family < AMDGPU_FAMILY_AI) {
508 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
509 				mod_freesync_handle_v_update(
510 				    adev->dm.freesync_module,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params);
513 
514 				dc_stream_adjust_vmin_vmax(
515 				    adev->dm.dc,
516 				    acrtc->dm_irq_params.stream,
517 				    &acrtc->dm_irq_params.vrr_params.adjust);
518 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
519 			}
520 		}
521 	}
522 }
523 
524 /**
525  * dm_crtc_high_irq() - Handles CRTC interrupt
526  * @interrupt_params: used for determining the CRTC instance
527  *
528  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529  * event handler.
530  */
531 static void dm_crtc_high_irq(void *interrupt_params)
532 {
533 	struct common_irq_params *irq_params = interrupt_params;
534 	struct amdgpu_device *adev = irq_params->adev;
535 	struct amdgpu_crtc *acrtc;
536 	unsigned long flags;
537 	int vrr_active;
538 
539 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 	if (!acrtc)
541 		return;
542 
543 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
544 
545 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
546 		      vrr_active, acrtc->dm_irq_params.active_planes);
547 
548 	/**
549 	 * Core vblank handling at start of front-porch is only possible
550 	 * in non-vrr mode, as only there vblank timestamping will give
551 	 * valid results while done in front-porch. Otherwise defer it
552 	 * to dm_vupdate_high_irq after end of front-porch.
553 	 */
554 	if (!vrr_active)
555 		drm_crtc_handle_vblank(&acrtc->base);
556 
557 	/**
558 	 * Following stuff must happen at start of vblank, for crc
559 	 * computation and below-the-range btr support in vrr mode.
560 	 */
561 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
562 
563 	/* BTR updates need to happen before VUPDATE on Vega and above. */
564 	if (adev->family < AMDGPU_FAMILY_AI)
565 		return;
566 
567 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
568 
569 	if (acrtc->dm_irq_params.stream &&
570 	    acrtc->dm_irq_params.vrr_params.supported &&
571 	    acrtc->dm_irq_params.freesync_config.state ==
572 		    VRR_STATE_ACTIVE_VARIABLE) {
573 		mod_freesync_handle_v_update(adev->dm.freesync_module,
574 					     acrtc->dm_irq_params.stream,
575 					     &acrtc->dm_irq_params.vrr_params);
576 
577 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578 					   &acrtc->dm_irq_params.vrr_params.adjust);
579 	}
580 
581 	/*
582 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
583 	 * In that case, pageflip completion interrupts won't fire and pageflip
584 	 * completion events won't get delivered. Prevent this by sending
585 	 * pending pageflip events from here if a flip is still pending.
586 	 *
587 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
588 	 * avoid race conditions between flip programming and completion,
589 	 * which could cause too early flip completion events.
590 	 */
591 	if (adev->family >= AMDGPU_FAMILY_RV &&
592 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
593 	    acrtc->dm_irq_params.active_planes == 0) {
594 		if (acrtc->event) {
595 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596 			acrtc->event = NULL;
597 			drm_crtc_vblank_put(&acrtc->base);
598 		}
599 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
600 	}
601 
602 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 }
604 
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615 	struct common_irq_params *irq_params = interrupt_params;
616 	struct amdgpu_device *adev = irq_params->adev;
617 	struct amdgpu_crtc *acrtc;
618 
619 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 
621 	if (!acrtc)
622 		return;
623 
624 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638 					struct dmub_notification *notify)
639 {
640 	if (adev->dm.dmub_notify)
641 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643 		complete(&adev->dm.dmub_aux_transfer_done);
644 }
645 
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655 			      struct dmub_notification *notify)
656 {
657 	struct amdgpu_dm_connector *aconnector;
658 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
659 	struct drm_connector *connector;
660 	struct drm_connector_list_iter iter;
661 	struct dc_link *link;
662 	uint8_t link_index = 0;
663 	struct drm_device *dev;
664 
665 	if (adev == NULL)
666 		return;
667 
668 	if (notify == NULL) {
669 		DRM_ERROR("DMUB HPD callback notification was NULL");
670 		return;
671 	}
672 
673 	if (notify->link_index > adev->dm.dc->link_count) {
674 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675 		return;
676 	}
677 
678 	link_index = notify->link_index;
679 	link = adev->dm.dc->links[link_index];
680 	dev = adev->dm.ddev;
681 
682 	drm_connector_list_iter_begin(dev, &iter);
683 	drm_for_each_connector_iter(connector, &iter) {
684 		aconnector = to_amdgpu_dm_connector(connector);
685 		if (link && aconnector->dc_link == link) {
686 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687 			hpd_aconnector = aconnector;
688 			break;
689 		}
690 	}
691 	drm_connector_list_iter_end(&iter);
692 
693 	if (hpd_aconnector) {
694 		if (notify->type == DMUB_NOTIFICATION_HPD)
695 			handle_hpd_irq_helper(hpd_aconnector);
696 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697 			handle_hpd_rx_irq(hpd_aconnector);
698 	}
699 }
700 
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714 					  enum dmub_notification_type type,
715 					  dmub_notify_interrupt_callback_t callback,
716 					  bool dmub_int_thread_offload)
717 {
718 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719 		adev->dm.dmub_callback[type] = callback;
720 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721 	} else
722 		return false;
723 
724 	return true;
725 }
726 
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729 	struct dmub_hpd_work *dmub_hpd_wrk;
730 
731 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732 
733 	if (!dmub_hpd_wrk->dmub_notify) {
734 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735 		return;
736 	}
737 
738 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740 		dmub_hpd_wrk->dmub_notify);
741 	}
742 
743 	kfree(dmub_hpd_wrk->dmub_notify);
744 	kfree(dmub_hpd_wrk);
745 
746 }
747 
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758 	struct dmub_notification notify;
759 	struct common_irq_params *irq_params = interrupt_params;
760 	struct amdgpu_device *adev = irq_params->adev;
761 	struct amdgpu_display_manager *dm = &adev->dm;
762 	struct dmcub_trace_buf_entry entry = { 0 };
763 	uint32_t count = 0;
764 	struct dmub_hpd_work *dmub_hpd_wrk;
765 	struct dc_link *plink = NULL;
766 
767 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
768 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769 
770 		do {
771 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
773 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
774 				continue;
775 			}
776 			if (!dm->dmub_callback[notify.type]) {
777 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778 				continue;
779 			}
780 			if (dm->dmub_thread_offload[notify.type] == true) {
781 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782 				if (!dmub_hpd_wrk) {
783 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784 					return;
785 				}
786 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787 				if (!dmub_hpd_wrk->dmub_notify) {
788 					kfree(dmub_hpd_wrk);
789 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790 					return;
791 				}
792 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793 				if (dmub_hpd_wrk->dmub_notify)
794 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795 				dmub_hpd_wrk->adev = adev;
796 				if (notify.type == DMUB_NOTIFICATION_HPD) {
797 					plink = adev->dm.dc->links[notify.link_index];
798 					if (plink) {
799 						plink->hpd_status =
800 							notify.hpd_status == DP_HPD_PLUG;
801 					}
802 				}
803 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804 			} else {
805 				dm->dmub_callback[notify.type](adev, &notify);
806 			}
807 		} while (notify.pending_notification);
808 	}
809 
810 
811 	do {
812 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814 							entry.param0, entry.param1);
815 
816 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818 		} else
819 			break;
820 
821 		count++;
822 
823 	} while (count <= DMUB_TRACE_MAX_READ);
824 
825 	if (count > DMUB_TRACE_MAX_READ)
826 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828 
829 static int dm_set_clockgating_state(void *handle,
830 		  enum amd_clockgating_state state)
831 {
832 	return 0;
833 }
834 
835 static int dm_set_powergating_state(void *handle,
836 		  enum amd_powergating_state state)
837 {
838 	return 0;
839 }
840 
841 /* Prototypes of private functions */
842 static int dm_early_init(void* handle);
843 
844 /* Allocate memory for FBC compressed data  */
845 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 {
847 	struct drm_device *dev = connector->dev;
848 	struct amdgpu_device *adev = drm_to_adev(dev);
849 	struct dm_compressor_info *compressor = &adev->dm.compressor;
850 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851 	struct drm_display_mode *mode;
852 	unsigned long max_size = 0;
853 
854 	if (adev->dm.dc->fbc_compressor == NULL)
855 		return;
856 
857 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
858 		return;
859 
860 	if (compressor->bo_ptr)
861 		return;
862 
863 
864 	list_for_each_entry(mode, &connector->modes, head) {
865 		if (max_size < mode->htotal * mode->vtotal)
866 			max_size = mode->htotal * mode->vtotal;
867 	}
868 
869 	if (max_size) {
870 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
871 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
872 			    &compressor->gpu_addr, &compressor->cpu_addr);
873 
874 		if (r)
875 			DRM_ERROR("DM: Failed to initialize FBC\n");
876 		else {
877 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879 		}
880 
881 	}
882 
883 }
884 
885 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886 					  int pipe, bool *enabled,
887 					  unsigned char *buf, int max_bytes)
888 {
889 	struct drm_device *dev = dev_get_drvdata(kdev);
890 	struct amdgpu_device *adev = drm_to_adev(dev);
891 	struct drm_connector *connector;
892 	struct drm_connector_list_iter conn_iter;
893 	struct amdgpu_dm_connector *aconnector;
894 	int ret = 0;
895 
896 	*enabled = false;
897 
898 	mutex_lock(&adev->dm.audio_lock);
899 
900 	drm_connector_list_iter_begin(dev, &conn_iter);
901 	drm_for_each_connector_iter(connector, &conn_iter) {
902 		aconnector = to_amdgpu_dm_connector(connector);
903 		if (aconnector->audio_inst != port)
904 			continue;
905 
906 		*enabled = true;
907 		ret = drm_eld_size(connector->eld);
908 		memcpy(buf, connector->eld, min(max_bytes, ret));
909 
910 		break;
911 	}
912 	drm_connector_list_iter_end(&conn_iter);
913 
914 	mutex_unlock(&adev->dm.audio_lock);
915 
916 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917 
918 	return ret;
919 }
920 
921 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922 	.get_eld = amdgpu_dm_audio_component_get_eld,
923 };
924 
925 static int amdgpu_dm_audio_component_bind(struct device *kdev,
926 				       struct device *hda_kdev, void *data)
927 {
928 	struct drm_device *dev = dev_get_drvdata(kdev);
929 	struct amdgpu_device *adev = drm_to_adev(dev);
930 	struct drm_audio_component *acomp = data;
931 
932 	acomp->ops = &amdgpu_dm_audio_component_ops;
933 	acomp->dev = kdev;
934 	adev->dm.audio_component = acomp;
935 
936 	return 0;
937 }
938 
939 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940 					  struct device *hda_kdev, void *data)
941 {
942 	struct drm_device *dev = dev_get_drvdata(kdev);
943 	struct amdgpu_device *adev = drm_to_adev(dev);
944 	struct drm_audio_component *acomp = data;
945 
946 	acomp->ops = NULL;
947 	acomp->dev = NULL;
948 	adev->dm.audio_component = NULL;
949 }
950 
951 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952 	.bind	= amdgpu_dm_audio_component_bind,
953 	.unbind	= amdgpu_dm_audio_component_unbind,
954 };
955 
956 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 {
958 	int i, ret;
959 
960 	if (!amdgpu_audio)
961 		return 0;
962 
963 	adev->mode_info.audio.enabled = true;
964 
965 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966 
967 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968 		adev->mode_info.audio.pin[i].channels = -1;
969 		adev->mode_info.audio.pin[i].rate = -1;
970 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
971 		adev->mode_info.audio.pin[i].status_bits = 0;
972 		adev->mode_info.audio.pin[i].category_code = 0;
973 		adev->mode_info.audio.pin[i].connected = false;
974 		adev->mode_info.audio.pin[i].id =
975 			adev->dm.dc->res_pool->audios[i]->inst;
976 		adev->mode_info.audio.pin[i].offset = 0;
977 	}
978 
979 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980 	if (ret < 0)
981 		return ret;
982 
983 	adev->dm.audio_registered = true;
984 
985 	return 0;
986 }
987 
988 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989 {
990 	if (!amdgpu_audio)
991 		return;
992 
993 	if (!adev->mode_info.audio.enabled)
994 		return;
995 
996 	if (adev->dm.audio_registered) {
997 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998 		adev->dm.audio_registered = false;
999 	}
1000 
1001 	/* TODO: Disable audio? */
1002 
1003 	adev->mode_info.audio.enabled = false;
1004 }
1005 
1006 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 {
1008 	struct drm_audio_component *acomp = adev->dm.audio_component;
1009 
1010 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012 
1013 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014 						 pin, -1);
1015 	}
1016 }
1017 
1018 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 {
1020 	const struct dmcub_firmware_header_v1_0 *hdr;
1021 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1022 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1023 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025 	struct abm *abm = adev->dm.dc->res_pool->abm;
1026 	struct dmub_srv_hw_params hw_params;
1027 	enum dmub_status status;
1028 	const unsigned char *fw_inst_const, *fw_bss_data;
1029 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1030 	bool has_hw_support;
1031 
1032 	if (!dmub_srv)
1033 		/* DMUB isn't supported on the ASIC. */
1034 		return 0;
1035 
1036 	if (!fb_info) {
1037 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1038 		return -EINVAL;
1039 	}
1040 
1041 	if (!dmub_fw) {
1042 		/* Firmware required for DMUB support. */
1043 		DRM_ERROR("No firmware provided for DMUB.\n");
1044 		return -EINVAL;
1045 	}
1046 
1047 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048 	if (status != DMUB_STATUS_OK) {
1049 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050 		return -EINVAL;
1051 	}
1052 
1053 	if (!has_hw_support) {
1054 		DRM_INFO("DMUB unsupported on ASIC\n");
1055 		return 0;
1056 	}
1057 
1058 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059 	status = dmub_srv_hw_reset(dmub_srv);
1060 	if (status != DMUB_STATUS_OK)
1061 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062 
1063 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064 
1065 	fw_inst_const = dmub_fw->data +
1066 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067 			PSP_HEADER_BYTES;
1068 
1069 	fw_bss_data = dmub_fw->data +
1070 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071 		      le32_to_cpu(hdr->inst_const_bytes);
1072 
1073 	/* Copy firmware and bios info into FB memory. */
1074 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076 
1077 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078 
1079 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1081 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1082 	 * will be done by dm_dmub_hw_init
1083 	 */
1084 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086 				fw_inst_const_size);
1087 	}
1088 
1089 	if (fw_bss_data_size)
1090 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091 		       fw_bss_data, fw_bss_data_size);
1092 
1093 	/* Copy firmware bios info into FB memory. */
1094 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095 	       adev->bios_size);
1096 
1097 	/* Reset regions that need to be reset. */
1098 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100 
1101 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103 
1104 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106 
1107 	/* Initialize hardware. */
1108 	memset(&hw_params, 0, sizeof(hw_params));
1109 	hw_params.fb_base = adev->gmc.fb_start;
1110 	hw_params.fb_offset = adev->gmc.aper_base;
1111 
1112 	/* backdoor load firmware and trigger dmub running */
1113 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114 		hw_params.load_inst_const = true;
1115 
1116 	if (dmcu)
1117 		hw_params.psp_version = dmcu->psp_version;
1118 
1119 	for (i = 0; i < fb_info->num_fb; ++i)
1120 		hw_params.fb[i] = &fb_info->fb[i];
1121 
1122 	switch (adev->ip_versions[DCE_HWIP][0]) {
1123 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124 		hw_params.dpia_supported = true;
1125 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126 		break;
1127 	default:
1128 		break;
1129 	}
1130 
1131 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132 	if (status != DMUB_STATUS_OK) {
1133 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134 		return -EINVAL;
1135 	}
1136 
1137 	/* Wait for firmware load to finish. */
1138 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139 	if (status != DMUB_STATUS_OK)
1140 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141 
1142 	/* Init DMCU and ABM if available. */
1143 	if (dmcu && abm) {
1144 		dmcu->funcs->dmcu_init(dmcu);
1145 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146 	}
1147 
1148 	if (!adev->dm.dc->ctx->dmub_srv)
1149 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150 	if (!adev->dm.dc->ctx->dmub_srv) {
1151 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152 		return -ENOMEM;
1153 	}
1154 
1155 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156 		 adev->dm.dmcub_fw_version);
1157 
1158 	return 0;
1159 }
1160 
1161 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162 {
1163 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164 	enum dmub_status status;
1165 	bool init;
1166 
1167 	if (!dmub_srv) {
1168 		/* DMUB isn't supported on the ASIC. */
1169 		return;
1170 	}
1171 
1172 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1173 	if (status != DMUB_STATUS_OK)
1174 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175 
1176 	if (status == DMUB_STATUS_OK && init) {
1177 		/* Wait for firmware load to finish. */
1178 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179 		if (status != DMUB_STATUS_OK)
1180 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181 	} else {
1182 		/* Perform the full hardware initialization. */
1183 		dm_dmub_hw_init(adev);
1184 	}
1185 }
1186 
1187 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1188 {
1189 	uint64_t pt_base;
1190 	uint32_t logical_addr_low;
1191 	uint32_t logical_addr_high;
1192 	uint32_t agp_base, agp_bot, agp_top;
1193 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1194 
1195 	memset(pa_config, 0, sizeof(*pa_config));
1196 
1197 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1199 
1200 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201 		/*
1202 		 * Raven2 has a HW issue that it is unable to use the vram which
1203 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204 		 * workaround that increase system aperture high address (add 1)
1205 		 * to get rid of the VM fault and hardware hang.
1206 		 */
1207 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208 	else
1209 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1210 
1211 	agp_base = 0;
1212 	agp_bot = adev->gmc.agp_start >> 24;
1213 	agp_top = adev->gmc.agp_end >> 24;
1214 
1215 
1216 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221 	page_table_base.low_part = lower_32_bits(pt_base);
1222 
1223 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225 
1226 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229 
1230 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233 
1234 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237 
1238 	pa_config->is_hvm_enabled = 0;
1239 
1240 }
1241 
1242 static void vblank_control_worker(struct work_struct *work)
1243 {
1244 	struct vblank_control_work *vblank_work =
1245 		container_of(work, struct vblank_control_work, work);
1246 	struct amdgpu_display_manager *dm = vblank_work->dm;
1247 
1248 	mutex_lock(&dm->dc_lock);
1249 
1250 	if (vblank_work->enable)
1251 		dm->active_vblank_irq_count++;
1252 	else if(dm->active_vblank_irq_count)
1253 		dm->active_vblank_irq_count--;
1254 
1255 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1256 
1257 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1258 
1259 	/* Control PSR based on vblank requirements from OS */
1260 	if (vblank_work->stream && vblank_work->stream->link) {
1261 		if (vblank_work->enable) {
1262 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263 				amdgpu_dm_psr_disable(vblank_work->stream);
1264 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267 			amdgpu_dm_psr_enable(vblank_work->stream);
1268 		}
1269 	}
1270 
1271 	mutex_unlock(&dm->dc_lock);
1272 
1273 	dc_stream_release(vblank_work->stream);
1274 
1275 	kfree(vblank_work);
1276 }
1277 
1278 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279 {
1280 	struct hpd_rx_irq_offload_work *offload_work;
1281 	struct amdgpu_dm_connector *aconnector;
1282 	struct dc_link *dc_link;
1283 	struct amdgpu_device *adev;
1284 	enum dc_connection_type new_connection_type = dc_connection_none;
1285 	unsigned long flags;
1286 
1287 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288 	aconnector = offload_work->offload_wq->aconnector;
1289 
1290 	if (!aconnector) {
1291 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292 		goto skip;
1293 	}
1294 
1295 	adev = drm_to_adev(aconnector->base.dev);
1296 	dc_link = aconnector->dc_link;
1297 
1298 	mutex_lock(&aconnector->hpd_lock);
1299 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300 		DRM_ERROR("KMS: Failed to detect connector\n");
1301 	mutex_unlock(&aconnector->hpd_lock);
1302 
1303 	if (new_connection_type == dc_connection_none)
1304 		goto skip;
1305 
1306 	if (amdgpu_in_reset(adev))
1307 		goto skip;
1308 
1309 	mutex_lock(&adev->dm.dc_lock);
1310 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311 		dc_link_dp_handle_automated_test(dc_link);
1312 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315 		dc_link_dp_handle_link_loss(dc_link);
1316 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317 		offload_work->offload_wq->is_handling_link_loss = false;
1318 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319 	}
1320 	mutex_unlock(&adev->dm.dc_lock);
1321 
1322 skip:
1323 	kfree(offload_work);
1324 
1325 }
1326 
1327 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328 {
1329 	int max_caps = dc->caps.max_links;
1330 	int i = 0;
1331 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332 
1333 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334 
1335 	if (!hpd_rx_offload_wq)
1336 		return NULL;
1337 
1338 
1339 	for (i = 0; i < max_caps; i++) {
1340 		hpd_rx_offload_wq[i].wq =
1341 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342 
1343 		if (hpd_rx_offload_wq[i].wq == NULL) {
1344 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345 			return NULL;
1346 		}
1347 
1348 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349 	}
1350 
1351 	return hpd_rx_offload_wq;
1352 }
1353 
1354 struct amdgpu_stutter_quirk {
1355 	u16 chip_vendor;
1356 	u16 chip_device;
1357 	u16 subsys_vendor;
1358 	u16 subsys_device;
1359 	u8 revision;
1360 };
1361 
1362 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365 	{ 0, 0, 0, 0, 0 },
1366 };
1367 
1368 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369 {
1370 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371 
1372 	while (p && p->chip_device != 0) {
1373 		if (pdev->vendor == p->chip_vendor &&
1374 		    pdev->device == p->chip_device &&
1375 		    pdev->subsystem_vendor == p->subsys_vendor &&
1376 		    pdev->subsystem_device == p->subsys_device &&
1377 		    pdev->revision == p->revision) {
1378 			return true;
1379 		}
1380 		++p;
1381 	}
1382 	return false;
1383 }
1384 
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387 	struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389 	struct dc_callback_init init_params;
1390 #endif
1391 	int r;
1392 
1393 	adev->dm.ddev = adev_to_drm(adev);
1394 	adev->dm.adev = adev;
1395 
1396 	/* Zero all the fields */
1397 	memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399 	memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401 
1402 	mutex_init(&adev->dm.dc_lock);
1403 	mutex_init(&adev->dm.audio_lock);
1404 	spin_lock_init(&adev->dm.vblank_lock);
1405 
1406 	if(amdgpu_dm_irq_init(adev)) {
1407 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408 		goto error;
1409 	}
1410 
1411 	init_data.asic_id.chip_family = adev->family;
1412 
1413 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1414 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1415 	init_data.asic_id.chip_id = adev->pdev->device;
1416 
1417 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1418 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419 	init_data.asic_id.atombios_base_address =
1420 		adev->mode_info.atom_context->bios;
1421 
1422 	init_data.driver = adev;
1423 
1424 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425 
1426 	if (!adev->dm.cgs_device) {
1427 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428 		goto error;
1429 	}
1430 
1431 	init_data.cgs_device = adev->dm.cgs_device;
1432 
1433 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434 
1435 	switch (adev->ip_versions[DCE_HWIP][0]) {
1436 	case IP_VERSION(2, 1, 0):
1437 		switch (adev->dm.dmcub_fw_version) {
1438 		case 0: /* development */
1439 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1440 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441 			init_data.flags.disable_dmcu = false;
1442 			break;
1443 		default:
1444 			init_data.flags.disable_dmcu = true;
1445 		}
1446 		break;
1447 	case IP_VERSION(2, 0, 3):
1448 		init_data.flags.disable_dmcu = true;
1449 		break;
1450 	default:
1451 		break;
1452 	}
1453 
1454 	switch (adev->asic_type) {
1455 	case CHIP_CARRIZO:
1456 	case CHIP_STONEY:
1457 		init_data.flags.gpu_vm_support = true;
1458 		break;
1459 	default:
1460 		switch (adev->ip_versions[DCE_HWIP][0]) {
1461 		case IP_VERSION(1, 0, 0):
1462 		case IP_VERSION(1, 0, 1):
1463 			/* enable S/G on PCO and RV2 */
1464 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1466 				init_data.flags.gpu_vm_support = true;
1467 			break;
1468 		case IP_VERSION(2, 1, 0):
1469 		case IP_VERSION(3, 0, 1):
1470 		case IP_VERSION(3, 1, 2):
1471 		case IP_VERSION(3, 1, 3):
1472 		case IP_VERSION(3, 1, 5):
1473 		case IP_VERSION(3, 1, 6):
1474 			init_data.flags.gpu_vm_support = true;
1475 			break;
1476 		default:
1477 			break;
1478 		}
1479 		break;
1480 	}
1481 
1482 	if (init_data.flags.gpu_vm_support)
1483 		adev->mode_info.gpu_vm_support = true;
1484 
1485 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486 		init_data.flags.fbc_support = true;
1487 
1488 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489 		init_data.flags.multi_mon_pp_mclk_switch = true;
1490 
1491 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492 		init_data.flags.disable_fractional_pwm = true;
1493 
1494 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495 		init_data.flags.edp_no_power_sequencing = true;
1496 
1497 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1501 
1502 	init_data.flags.seamless_boot_edp_requested = false;
1503 
1504 	if (check_seamless_boot_capability(adev)) {
1505 		init_data.flags.seamless_boot_edp_requested = true;
1506 		init_data.flags.allow_seamless_boot_optimization = true;
1507 		DRM_INFO("Seamless boot condition check passed\n");
1508 	}
1509 
1510 	init_data.flags.enable_mipi_converter_optimization = true;
1511 
1512 	INIT_LIST_HEAD(&adev->dm.da_list);
1513 	/* Display Core create. */
1514 	adev->dm.dc = dc_create(&init_data);
1515 
1516 	if (adev->dm.dc) {
1517 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1518 	} else {
1519 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1520 		goto error;
1521 	}
1522 
1523 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1524 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1525 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1526 	}
1527 
1528 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1529 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1530 	if (dm_should_disable_stutter(adev->pdev))
1531 		adev->dm.dc->debug.disable_stutter = true;
1532 
1533 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1534 		adev->dm.dc->debug.disable_stutter = true;
1535 
1536 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1537 		adev->dm.dc->debug.disable_dsc = true;
1538 		adev->dm.dc->debug.disable_dsc_edp = true;
1539 	}
1540 
1541 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1542 		adev->dm.dc->debug.disable_clock_gate = true;
1543 
1544 	r = dm_dmub_hw_init(adev);
1545 	if (r) {
1546 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1547 		goto error;
1548 	}
1549 
1550 	dc_hardware_init(adev->dm.dc);
1551 
1552 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1553 	if (!adev->dm.hpd_rx_offload_wq) {
1554 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1555 		goto error;
1556 	}
1557 
1558 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1559 		struct dc_phy_addr_space_config pa_config;
1560 
1561 		mmhub_read_system_context(adev, &pa_config);
1562 
1563 		// Call the DC init_memory func
1564 		dc_setup_system_context(adev->dm.dc, &pa_config);
1565 	}
1566 
1567 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1568 	if (!adev->dm.freesync_module) {
1569 		DRM_ERROR(
1570 		"amdgpu: failed to initialize freesync_module.\n");
1571 	} else
1572 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1573 				adev->dm.freesync_module);
1574 
1575 	amdgpu_dm_init_color_mod();
1576 
1577 	if (adev->dm.dc->caps.max_links > 0) {
1578 		adev->dm.vblank_control_workqueue =
1579 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1580 		if (!adev->dm.vblank_control_workqueue)
1581 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1582 	}
1583 
1584 #ifdef CONFIG_DRM_AMD_DC_HDCP
1585 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1586 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1587 
1588 		if (!adev->dm.hdcp_workqueue)
1589 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1590 		else
1591 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1592 
1593 		dc_init_callbacks(adev->dm.dc, &init_params);
1594 	}
1595 #endif
1596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1597 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1598 #endif
1599 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1600 		init_completion(&adev->dm.dmub_aux_transfer_done);
1601 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1602 		if (!adev->dm.dmub_notify) {
1603 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1604 			goto error;
1605 		}
1606 
1607 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1608 		if (!adev->dm.delayed_hpd_wq) {
1609 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1610 			goto error;
1611 		}
1612 
1613 		amdgpu_dm_outbox_init(adev);
1614 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615 			dmub_aux_setconfig_callback, false)) {
1616 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1617 			goto error;
1618 		}
1619 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1621 			goto error;
1622 		}
1623 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1625 			goto error;
1626 		}
1627 	}
1628 
1629 	if (amdgpu_dm_initialize_drm_device(adev)) {
1630 		DRM_ERROR(
1631 		"amdgpu: failed to initialize sw for display support.\n");
1632 		goto error;
1633 	}
1634 
1635 	/* create fake encoders for MST */
1636 	dm_dp_create_fake_mst_encoders(adev);
1637 
1638 	/* TODO: Add_display_info? */
1639 
1640 	/* TODO use dynamic cursor width */
1641 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1642 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1643 
1644 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1645 		DRM_ERROR(
1646 		"amdgpu: failed to initialize sw for display support.\n");
1647 		goto error;
1648 	}
1649 
1650 
1651 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1652 
1653 	return 0;
1654 error:
1655 	amdgpu_dm_fini(adev);
1656 
1657 	return -EINVAL;
1658 }
1659 
1660 static int amdgpu_dm_early_fini(void *handle)
1661 {
1662 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663 
1664 	amdgpu_dm_audio_fini(adev);
1665 
1666 	return 0;
1667 }
1668 
1669 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1670 {
1671 	int i;
1672 
1673 	if (adev->dm.vblank_control_workqueue) {
1674 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1675 		adev->dm.vblank_control_workqueue = NULL;
1676 	}
1677 
1678 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1679 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1680 	}
1681 
1682 	amdgpu_dm_destroy_drm_device(&adev->dm);
1683 
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685 	if (adev->dm.crc_rd_wrk) {
1686 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1687 		kfree(adev->dm.crc_rd_wrk);
1688 		adev->dm.crc_rd_wrk = NULL;
1689 	}
1690 #endif
1691 #ifdef CONFIG_DRM_AMD_DC_HDCP
1692 	if (adev->dm.hdcp_workqueue) {
1693 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1694 		adev->dm.hdcp_workqueue = NULL;
1695 	}
1696 
1697 	if (adev->dm.dc)
1698 		dc_deinit_callbacks(adev->dm.dc);
1699 #endif
1700 
1701 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1702 
1703 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1704 		kfree(adev->dm.dmub_notify);
1705 		adev->dm.dmub_notify = NULL;
1706 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1707 		adev->dm.delayed_hpd_wq = NULL;
1708 	}
1709 
1710 	if (adev->dm.dmub_bo)
1711 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1712 				      &adev->dm.dmub_bo_gpu_addr,
1713 				      &adev->dm.dmub_bo_cpu_addr);
1714 
1715 	if (adev->dm.hpd_rx_offload_wq) {
1716 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1717 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1718 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1719 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1720 			}
1721 		}
1722 
1723 		kfree(adev->dm.hpd_rx_offload_wq);
1724 		adev->dm.hpd_rx_offload_wq = NULL;
1725 	}
1726 
1727 	/* DC Destroy TODO: Replace destroy DAL */
1728 	if (adev->dm.dc)
1729 		dc_destroy(&adev->dm.dc);
1730 	/*
1731 	 * TODO: pageflip, vlank interrupt
1732 	 *
1733 	 * amdgpu_dm_irq_fini(adev);
1734 	 */
1735 
1736 	if (adev->dm.cgs_device) {
1737 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1738 		adev->dm.cgs_device = NULL;
1739 	}
1740 	if (adev->dm.freesync_module) {
1741 		mod_freesync_destroy(adev->dm.freesync_module);
1742 		adev->dm.freesync_module = NULL;
1743 	}
1744 
1745 	mutex_destroy(&adev->dm.audio_lock);
1746 	mutex_destroy(&adev->dm.dc_lock);
1747 
1748 	return;
1749 }
1750 
1751 static int load_dmcu_fw(struct amdgpu_device *adev)
1752 {
1753 	const char *fw_name_dmcu = NULL;
1754 	int r;
1755 	const struct dmcu_firmware_header_v1_0 *hdr;
1756 
1757 	switch(adev->asic_type) {
1758 #if defined(CONFIG_DRM_AMD_DC_SI)
1759 	case CHIP_TAHITI:
1760 	case CHIP_PITCAIRN:
1761 	case CHIP_VERDE:
1762 	case CHIP_OLAND:
1763 #endif
1764 	case CHIP_BONAIRE:
1765 	case CHIP_HAWAII:
1766 	case CHIP_KAVERI:
1767 	case CHIP_KABINI:
1768 	case CHIP_MULLINS:
1769 	case CHIP_TONGA:
1770 	case CHIP_FIJI:
1771 	case CHIP_CARRIZO:
1772 	case CHIP_STONEY:
1773 	case CHIP_POLARIS11:
1774 	case CHIP_POLARIS10:
1775 	case CHIP_POLARIS12:
1776 	case CHIP_VEGAM:
1777 	case CHIP_VEGA10:
1778 	case CHIP_VEGA12:
1779 	case CHIP_VEGA20:
1780 		return 0;
1781 	case CHIP_NAVI12:
1782 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1783 		break;
1784 	case CHIP_RAVEN:
1785 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1786 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1788 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789 		else
1790 			return 0;
1791 		break;
1792 	default:
1793 		switch (adev->ip_versions[DCE_HWIP][0]) {
1794 		case IP_VERSION(2, 0, 2):
1795 		case IP_VERSION(2, 0, 3):
1796 		case IP_VERSION(2, 0, 0):
1797 		case IP_VERSION(2, 1, 0):
1798 		case IP_VERSION(3, 0, 0):
1799 		case IP_VERSION(3, 0, 2):
1800 		case IP_VERSION(3, 0, 3):
1801 		case IP_VERSION(3, 0, 1):
1802 		case IP_VERSION(3, 1, 2):
1803 		case IP_VERSION(3, 1, 3):
1804 		case IP_VERSION(3, 1, 5):
1805 		case IP_VERSION(3, 1, 6):
1806 			return 0;
1807 		default:
1808 			break;
1809 		}
1810 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1811 		return -EINVAL;
1812 	}
1813 
1814 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1815 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1816 		return 0;
1817 	}
1818 
1819 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1820 	if (r == -ENOENT) {
1821 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1822 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1823 		adev->dm.fw_dmcu = NULL;
1824 		return 0;
1825 	}
1826 	if (r) {
1827 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1828 			fw_name_dmcu);
1829 		return r;
1830 	}
1831 
1832 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1833 	if (r) {
1834 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1835 			fw_name_dmcu);
1836 		release_firmware(adev->dm.fw_dmcu);
1837 		adev->dm.fw_dmcu = NULL;
1838 		return r;
1839 	}
1840 
1841 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1842 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1843 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1844 	adev->firmware.fw_size +=
1845 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1846 
1847 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1848 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1849 	adev->firmware.fw_size +=
1850 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1851 
1852 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1853 
1854 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1855 
1856 	return 0;
1857 }
1858 
1859 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1860 {
1861 	struct amdgpu_device *adev = ctx;
1862 
1863 	return dm_read_reg(adev->dm.dc->ctx, address);
1864 }
1865 
1866 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1867 				     uint32_t value)
1868 {
1869 	struct amdgpu_device *adev = ctx;
1870 
1871 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1872 }
1873 
1874 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1875 {
1876 	struct dmub_srv_create_params create_params;
1877 	struct dmub_srv_region_params region_params;
1878 	struct dmub_srv_region_info region_info;
1879 	struct dmub_srv_fb_params fb_params;
1880 	struct dmub_srv_fb_info *fb_info;
1881 	struct dmub_srv *dmub_srv;
1882 	const struct dmcub_firmware_header_v1_0 *hdr;
1883 	const char *fw_name_dmub;
1884 	enum dmub_asic dmub_asic;
1885 	enum dmub_status status;
1886 	int r;
1887 
1888 	switch (adev->ip_versions[DCE_HWIP][0]) {
1889 	case IP_VERSION(2, 1, 0):
1890 		dmub_asic = DMUB_ASIC_DCN21;
1891 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1892 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1893 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1894 		break;
1895 	case IP_VERSION(3, 0, 0):
1896 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1897 			dmub_asic = DMUB_ASIC_DCN30;
1898 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1899 		} else {
1900 			dmub_asic = DMUB_ASIC_DCN30;
1901 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1902 		}
1903 		break;
1904 	case IP_VERSION(3, 0, 1):
1905 		dmub_asic = DMUB_ASIC_DCN301;
1906 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1907 		break;
1908 	case IP_VERSION(3, 0, 2):
1909 		dmub_asic = DMUB_ASIC_DCN302;
1910 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1911 		break;
1912 	case IP_VERSION(3, 0, 3):
1913 		dmub_asic = DMUB_ASIC_DCN303;
1914 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 1, 2):
1917 	case IP_VERSION(3, 1, 3):
1918 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1919 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1920 		break;
1921 	case IP_VERSION(3, 1, 5):
1922 		dmub_asic = DMUB_ASIC_DCN315;
1923 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1924 		break;
1925 	case IP_VERSION(3, 1, 6):
1926 		dmub_asic = DMUB_ASIC_DCN316;
1927 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1928 		break;
1929 	default:
1930 		/* ASIC doesn't support DMUB. */
1931 		return 0;
1932 	}
1933 
1934 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1935 	if (r) {
1936 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1937 		return 0;
1938 	}
1939 
1940 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1941 	if (r) {
1942 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1943 		return 0;
1944 	}
1945 
1946 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1947 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1948 
1949 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1950 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1951 			AMDGPU_UCODE_ID_DMCUB;
1952 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1953 			adev->dm.dmub_fw;
1954 		adev->firmware.fw_size +=
1955 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1956 
1957 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1958 			 adev->dm.dmcub_fw_version);
1959 	}
1960 
1961 
1962 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1963 	dmub_srv = adev->dm.dmub_srv;
1964 
1965 	if (!dmub_srv) {
1966 		DRM_ERROR("Failed to allocate DMUB service!\n");
1967 		return -ENOMEM;
1968 	}
1969 
1970 	memset(&create_params, 0, sizeof(create_params));
1971 	create_params.user_ctx = adev;
1972 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1973 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1974 	create_params.asic = dmub_asic;
1975 
1976 	/* Create the DMUB service. */
1977 	status = dmub_srv_create(dmub_srv, &create_params);
1978 	if (status != DMUB_STATUS_OK) {
1979 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1980 		return -EINVAL;
1981 	}
1982 
1983 	/* Calculate the size of all the regions for the DMUB service. */
1984 	memset(&region_params, 0, sizeof(region_params));
1985 
1986 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1987 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1988 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1989 	region_params.vbios_size = adev->bios_size;
1990 	region_params.fw_bss_data = region_params.bss_data_size ?
1991 		adev->dm.dmub_fw->data +
1992 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1993 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1994 	region_params.fw_inst_const =
1995 		adev->dm.dmub_fw->data +
1996 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1997 		PSP_HEADER_BYTES;
1998 
1999 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2000 					   &region_info);
2001 
2002 	if (status != DMUB_STATUS_OK) {
2003 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2004 		return -EINVAL;
2005 	}
2006 
2007 	/*
2008 	 * Allocate a framebuffer based on the total size of all the regions.
2009 	 * TODO: Move this into GART.
2010 	 */
2011 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2012 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2013 				    &adev->dm.dmub_bo_gpu_addr,
2014 				    &adev->dm.dmub_bo_cpu_addr);
2015 	if (r)
2016 		return r;
2017 
2018 	/* Rebase the regions on the framebuffer address. */
2019 	memset(&fb_params, 0, sizeof(fb_params));
2020 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2021 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2022 	fb_params.region_info = &region_info;
2023 
2024 	adev->dm.dmub_fb_info =
2025 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2026 	fb_info = adev->dm.dmub_fb_info;
2027 
2028 	if (!fb_info) {
2029 		DRM_ERROR(
2030 			"Failed to allocate framebuffer info for DMUB service!\n");
2031 		return -ENOMEM;
2032 	}
2033 
2034 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2035 	if (status != DMUB_STATUS_OK) {
2036 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2037 		return -EINVAL;
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static int dm_sw_init(void *handle)
2044 {
2045 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2046 	int r;
2047 
2048 	r = dm_dmub_sw_init(adev);
2049 	if (r)
2050 		return r;
2051 
2052 	return load_dmcu_fw(adev);
2053 }
2054 
2055 static int dm_sw_fini(void *handle)
2056 {
2057 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2058 
2059 	kfree(adev->dm.dmub_fb_info);
2060 	adev->dm.dmub_fb_info = NULL;
2061 
2062 	if (adev->dm.dmub_srv) {
2063 		dmub_srv_destroy(adev->dm.dmub_srv);
2064 		adev->dm.dmub_srv = NULL;
2065 	}
2066 
2067 	release_firmware(adev->dm.dmub_fw);
2068 	adev->dm.dmub_fw = NULL;
2069 
2070 	release_firmware(adev->dm.fw_dmcu);
2071 	adev->dm.fw_dmcu = NULL;
2072 
2073 	return 0;
2074 }
2075 
2076 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2077 {
2078 	struct amdgpu_dm_connector *aconnector;
2079 	struct drm_connector *connector;
2080 	struct drm_connector_list_iter iter;
2081 	int ret = 0;
2082 
2083 	drm_connector_list_iter_begin(dev, &iter);
2084 	drm_for_each_connector_iter(connector, &iter) {
2085 		aconnector = to_amdgpu_dm_connector(connector);
2086 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2087 		    aconnector->mst_mgr.aux) {
2088 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2089 					 aconnector,
2090 					 aconnector->base.base.id);
2091 
2092 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2093 			if (ret < 0) {
2094 				DRM_ERROR("DM_MST: Failed to start MST\n");
2095 				aconnector->dc_link->type =
2096 					dc_connection_single;
2097 				break;
2098 			}
2099 		}
2100 	}
2101 	drm_connector_list_iter_end(&iter);
2102 
2103 	return ret;
2104 }
2105 
2106 static int dm_late_init(void *handle)
2107 {
2108 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2109 
2110 	struct dmcu_iram_parameters params;
2111 	unsigned int linear_lut[16];
2112 	int i;
2113 	struct dmcu *dmcu = NULL;
2114 
2115 	dmcu = adev->dm.dc->res_pool->dmcu;
2116 
2117 	for (i = 0; i < 16; i++)
2118 		linear_lut[i] = 0xFFFF * i / 15;
2119 
2120 	params.set = 0;
2121 	params.backlight_ramping_override = false;
2122 	params.backlight_ramping_start = 0xCCCC;
2123 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2124 	params.backlight_lut_array_size = 16;
2125 	params.backlight_lut_array = linear_lut;
2126 
2127 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2128 	 * 0xFFFF x 0.01 = 0x28F
2129 	 */
2130 	params.min_abm_backlight = 0x28F;
2131 	/* In the case where abm is implemented on dmcub,
2132 	* dmcu object will be null.
2133 	* ABM 2.4 and up are implemented on dmcub.
2134 	*/
2135 	if (dmcu) {
2136 		if (!dmcu_load_iram(dmcu, params))
2137 			return -EINVAL;
2138 	} else if (adev->dm.dc->ctx->dmub_srv) {
2139 		struct dc_link *edp_links[MAX_NUM_EDP];
2140 		int edp_num;
2141 
2142 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2143 		for (i = 0; i < edp_num; i++) {
2144 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2145 				return -EINVAL;
2146 		}
2147 	}
2148 
2149 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2150 }
2151 
2152 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2153 {
2154 	struct amdgpu_dm_connector *aconnector;
2155 	struct drm_connector *connector;
2156 	struct drm_connector_list_iter iter;
2157 	struct drm_dp_mst_topology_mgr *mgr;
2158 	int ret;
2159 	bool need_hotplug = false;
2160 
2161 	drm_connector_list_iter_begin(dev, &iter);
2162 	drm_for_each_connector_iter(connector, &iter) {
2163 		aconnector = to_amdgpu_dm_connector(connector);
2164 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2165 		    aconnector->mst_port)
2166 			continue;
2167 
2168 		mgr = &aconnector->mst_mgr;
2169 
2170 		if (suspend) {
2171 			drm_dp_mst_topology_mgr_suspend(mgr);
2172 		} else {
2173 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2174 			if (ret < 0) {
2175 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2176 				need_hotplug = true;
2177 			}
2178 		}
2179 	}
2180 	drm_connector_list_iter_end(&iter);
2181 
2182 	if (need_hotplug)
2183 		drm_kms_helper_hotplug_event(dev);
2184 }
2185 
2186 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2187 {
2188 	int ret = 0;
2189 
2190 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2191 	 * on window driver dc implementation.
2192 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2193 	 * should be passed to smu during boot up and resume from s3.
2194 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2195 	 * dcn20_resource_construct
2196 	 * then call pplib functions below to pass the settings to smu:
2197 	 * smu_set_watermarks_for_clock_ranges
2198 	 * smu_set_watermarks_table
2199 	 * navi10_set_watermarks_table
2200 	 * smu_write_watermarks_table
2201 	 *
2202 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2203 	 * dc has implemented different flow for window driver:
2204 	 * dc_hardware_init / dc_set_power_state
2205 	 * dcn10_init_hw
2206 	 * notify_wm_ranges
2207 	 * set_wm_ranges
2208 	 * -- Linux
2209 	 * smu_set_watermarks_for_clock_ranges
2210 	 * renoir_set_watermarks_table
2211 	 * smu_write_watermarks_table
2212 	 *
2213 	 * For Linux,
2214 	 * dc_hardware_init -> amdgpu_dm_init
2215 	 * dc_set_power_state --> dm_resume
2216 	 *
2217 	 * therefore, this function apply to navi10/12/14 but not Renoir
2218 	 * *
2219 	 */
2220 	switch (adev->ip_versions[DCE_HWIP][0]) {
2221 	case IP_VERSION(2, 0, 2):
2222 	case IP_VERSION(2, 0, 0):
2223 		break;
2224 	default:
2225 		return 0;
2226 	}
2227 
2228 	ret = amdgpu_dpm_write_watermarks_table(adev);
2229 	if (ret) {
2230 		DRM_ERROR("Failed to update WMTABLE!\n");
2231 		return ret;
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 /**
2238  * dm_hw_init() - Initialize DC device
2239  * @handle: The base driver device containing the amdgpu_dm device.
2240  *
2241  * Initialize the &struct amdgpu_display_manager device. This involves calling
2242  * the initializers of each DM component, then populating the struct with them.
2243  *
2244  * Although the function implies hardware initialization, both hardware and
2245  * software are initialized here. Splitting them out to their relevant init
2246  * hooks is a future TODO item.
2247  *
2248  * Some notable things that are initialized here:
2249  *
2250  * - Display Core, both software and hardware
2251  * - DC modules that we need (freesync and color management)
2252  * - DRM software states
2253  * - Interrupt sources and handlers
2254  * - Vblank support
2255  * - Debug FS entries, if enabled
2256  */
2257 static int dm_hw_init(void *handle)
2258 {
2259 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2260 	/* Create DAL display manager */
2261 	amdgpu_dm_init(adev);
2262 	amdgpu_dm_hpd_init(adev);
2263 
2264 	return 0;
2265 }
2266 
2267 /**
2268  * dm_hw_fini() - Teardown DC device
2269  * @handle: The base driver device containing the amdgpu_dm device.
2270  *
2271  * Teardown components within &struct amdgpu_display_manager that require
2272  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2273  * were loaded. Also flush IRQ workqueues and disable them.
2274  */
2275 static int dm_hw_fini(void *handle)
2276 {
2277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278 
2279 	amdgpu_dm_hpd_fini(adev);
2280 
2281 	amdgpu_dm_irq_fini(adev);
2282 	amdgpu_dm_fini(adev);
2283 	return 0;
2284 }
2285 
2286 
2287 static int dm_enable_vblank(struct drm_crtc *crtc);
2288 static void dm_disable_vblank(struct drm_crtc *crtc);
2289 
2290 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2291 				 struct dc_state *state, bool enable)
2292 {
2293 	enum dc_irq_source irq_source;
2294 	struct amdgpu_crtc *acrtc;
2295 	int rc = -EBUSY;
2296 	int i = 0;
2297 
2298 	for (i = 0; i < state->stream_count; i++) {
2299 		acrtc = get_crtc_by_otg_inst(
2300 				adev, state->stream_status[i].primary_otg_inst);
2301 
2302 		if (acrtc && state->stream_status[i].plane_count != 0) {
2303 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2304 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2305 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2306 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2307 			if (rc)
2308 				DRM_WARN("Failed to %s pflip interrupts\n",
2309 					 enable ? "enable" : "disable");
2310 
2311 			if (enable) {
2312 				rc = dm_enable_vblank(&acrtc->base);
2313 				if (rc)
2314 					DRM_WARN("Failed to enable vblank interrupts\n");
2315 			} else {
2316 				dm_disable_vblank(&acrtc->base);
2317 			}
2318 
2319 		}
2320 	}
2321 
2322 }
2323 
2324 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2325 {
2326 	struct dc_state *context = NULL;
2327 	enum dc_status res = DC_ERROR_UNEXPECTED;
2328 	int i;
2329 	struct dc_stream_state *del_streams[MAX_PIPES];
2330 	int del_streams_count = 0;
2331 
2332 	memset(del_streams, 0, sizeof(del_streams));
2333 
2334 	context = dc_create_state(dc);
2335 	if (context == NULL)
2336 		goto context_alloc_fail;
2337 
2338 	dc_resource_state_copy_construct_current(dc, context);
2339 
2340 	/* First remove from context all streams */
2341 	for (i = 0; i < context->stream_count; i++) {
2342 		struct dc_stream_state *stream = context->streams[i];
2343 
2344 		del_streams[del_streams_count++] = stream;
2345 	}
2346 
2347 	/* Remove all planes for removed streams and then remove the streams */
2348 	for (i = 0; i < del_streams_count; i++) {
2349 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2350 			res = DC_FAIL_DETACH_SURFACES;
2351 			goto fail;
2352 		}
2353 
2354 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2355 		if (res != DC_OK)
2356 			goto fail;
2357 	}
2358 
2359 	res = dc_commit_state(dc, context);
2360 
2361 fail:
2362 	dc_release_state(context);
2363 
2364 context_alloc_fail:
2365 	return res;
2366 }
2367 
2368 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2369 {
2370 	int i;
2371 
2372 	if (dm->hpd_rx_offload_wq) {
2373 		for (i = 0; i < dm->dc->caps.max_links; i++)
2374 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2375 	}
2376 }
2377 
2378 static int dm_suspend(void *handle)
2379 {
2380 	struct amdgpu_device *adev = handle;
2381 	struct amdgpu_display_manager *dm = &adev->dm;
2382 	int ret = 0;
2383 
2384 	if (amdgpu_in_reset(adev)) {
2385 		mutex_lock(&dm->dc_lock);
2386 
2387 		dc_allow_idle_optimizations(adev->dm.dc, false);
2388 
2389 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2390 
2391 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2392 
2393 		amdgpu_dm_commit_zero_streams(dm->dc);
2394 
2395 		amdgpu_dm_irq_suspend(adev);
2396 
2397 		hpd_rx_irq_work_suspend(dm);
2398 
2399 		return ret;
2400 	}
2401 
2402 	WARN_ON(adev->dm.cached_state);
2403 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2404 
2405 	s3_handle_mst(adev_to_drm(adev), true);
2406 
2407 	amdgpu_dm_irq_suspend(adev);
2408 
2409 	hpd_rx_irq_work_suspend(dm);
2410 
2411 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2412 
2413 	return 0;
2414 }
2415 
2416 struct amdgpu_dm_connector *
2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418 					     struct drm_crtc *crtc)
2419 {
2420 	uint32_t i;
2421 	struct drm_connector_state *new_con_state;
2422 	struct drm_connector *connector;
2423 	struct drm_crtc *crtc_from_state;
2424 
2425 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426 		crtc_from_state = new_con_state->crtc;
2427 
2428 		if (crtc_from_state == crtc)
2429 			return to_amdgpu_dm_connector(connector);
2430 	}
2431 
2432 	return NULL;
2433 }
2434 
2435 static void emulated_link_detect(struct dc_link *link)
2436 {
2437 	struct dc_sink_init_data sink_init_data = { 0 };
2438 	struct display_sink_capability sink_caps = { 0 };
2439 	enum dc_edid_status edid_status;
2440 	struct dc_context *dc_ctx = link->ctx;
2441 	struct dc_sink *sink = NULL;
2442 	struct dc_sink *prev_sink = NULL;
2443 
2444 	link->type = dc_connection_none;
2445 	prev_sink = link->local_sink;
2446 
2447 	if (prev_sink)
2448 		dc_sink_release(prev_sink);
2449 
2450 	switch (link->connector_signal) {
2451 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2452 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2454 		break;
2455 	}
2456 
2457 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2460 		break;
2461 	}
2462 
2463 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2466 		break;
2467 	}
2468 
2469 	case SIGNAL_TYPE_LVDS: {
2470 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2472 		break;
2473 	}
2474 
2475 	case SIGNAL_TYPE_EDP: {
2476 		sink_caps.transaction_type =
2477 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478 		sink_caps.signal = SIGNAL_TYPE_EDP;
2479 		break;
2480 	}
2481 
2482 	case SIGNAL_TYPE_DISPLAY_PORT: {
2483 		sink_caps.transaction_type =
2484 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2486 		break;
2487 	}
2488 
2489 	default:
2490 		DC_ERROR("Invalid connector type! signal:%d\n",
2491 			link->connector_signal);
2492 		return;
2493 	}
2494 
2495 	sink_init_data.link = link;
2496 	sink_init_data.sink_signal = sink_caps.signal;
2497 
2498 	sink = dc_sink_create(&sink_init_data);
2499 	if (!sink) {
2500 		DC_ERROR("Failed to create sink!\n");
2501 		return;
2502 	}
2503 
2504 	/* dc_sink_create returns a new reference */
2505 	link->local_sink = sink;
2506 
2507 	edid_status = dm_helpers_read_local_edid(
2508 			link->ctx,
2509 			link,
2510 			sink);
2511 
2512 	if (edid_status != EDID_OK)
2513 		DC_ERROR("Failed to read EDID");
2514 
2515 }
2516 
2517 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518 				     struct amdgpu_display_manager *dm)
2519 {
2520 	struct {
2521 		struct dc_surface_update surface_updates[MAX_SURFACES];
2522 		struct dc_plane_info plane_infos[MAX_SURFACES];
2523 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525 		struct dc_stream_update stream_update;
2526 	} * bundle;
2527 	int k, m;
2528 
2529 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2530 
2531 	if (!bundle) {
2532 		dm_error("Failed to allocate update bundle\n");
2533 		goto cleanup;
2534 	}
2535 
2536 	for (k = 0; k < dc_state->stream_count; k++) {
2537 		bundle->stream_update.stream = dc_state->streams[k];
2538 
2539 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540 			bundle->surface_updates[m].surface =
2541 				dc_state->stream_status->plane_states[m];
2542 			bundle->surface_updates[m].surface->force_full_update =
2543 				true;
2544 		}
2545 		dc_commit_updates_for_stream(
2546 			dm->dc, bundle->surface_updates,
2547 			dc_state->stream_status->plane_count,
2548 			dc_state->streams[k], &bundle->stream_update, dc_state);
2549 	}
2550 
2551 cleanup:
2552 	kfree(bundle);
2553 
2554 	return;
2555 }
2556 
2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2558 {
2559 	struct dc_stream_state *stream_state;
2560 	struct amdgpu_dm_connector *aconnector = link->priv;
2561 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562 	struct dc_stream_update stream_update;
2563 	bool dpms_off = true;
2564 
2565 	memset(&stream_update, 0, sizeof(stream_update));
2566 	stream_update.dpms_off = &dpms_off;
2567 
2568 	mutex_lock(&adev->dm.dc_lock);
2569 	stream_state = dc_stream_find_from_link(link);
2570 
2571 	if (stream_state == NULL) {
2572 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573 		mutex_unlock(&adev->dm.dc_lock);
2574 		return;
2575 	}
2576 
2577 	stream_update.stream = stream_state;
2578 	acrtc_state->force_dpms_off = true;
2579 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2580 				     stream_state, &stream_update,
2581 				     stream_state->ctx->dc->current_state);
2582 	mutex_unlock(&adev->dm.dc_lock);
2583 }
2584 
2585 static int dm_resume(void *handle)
2586 {
2587 	struct amdgpu_device *adev = handle;
2588 	struct drm_device *ddev = adev_to_drm(adev);
2589 	struct amdgpu_display_manager *dm = &adev->dm;
2590 	struct amdgpu_dm_connector *aconnector;
2591 	struct drm_connector *connector;
2592 	struct drm_connector_list_iter iter;
2593 	struct drm_crtc *crtc;
2594 	struct drm_crtc_state *new_crtc_state;
2595 	struct dm_crtc_state *dm_new_crtc_state;
2596 	struct drm_plane *plane;
2597 	struct drm_plane_state *new_plane_state;
2598 	struct dm_plane_state *dm_new_plane_state;
2599 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2600 	enum dc_connection_type new_connection_type = dc_connection_none;
2601 	struct dc_state *dc_state;
2602 	int i, r, j;
2603 
2604 	if (amdgpu_in_reset(adev)) {
2605 		dc_state = dm->cached_dc_state;
2606 
2607 		/*
2608 		 * The dc->current_state is backed up into dm->cached_dc_state
2609 		 * before we commit 0 streams.
2610 		 *
2611 		 * DC will clear link encoder assignments on the real state
2612 		 * but the changes won't propagate over to the copy we made
2613 		 * before the 0 streams commit.
2614 		 *
2615 		 * DC expects that link encoder assignments are *not* valid
2616 		 * when committing a state, so as a workaround we can copy
2617 		 * off of the current state.
2618 		 *
2619 		 * We lose the previous assignments, but we had already
2620 		 * commit 0 streams anyway.
2621 		 */
2622 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2623 
2624 		if (dc_enable_dmub_notifications(adev->dm.dc))
2625 			amdgpu_dm_outbox_init(adev);
2626 
2627 		r = dm_dmub_hw_init(adev);
2628 		if (r)
2629 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2630 
2631 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2632 		dc_resume(dm->dc);
2633 
2634 		amdgpu_dm_irq_resume_early(adev);
2635 
2636 		for (i = 0; i < dc_state->stream_count; i++) {
2637 			dc_state->streams[i]->mode_changed = true;
2638 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2639 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2640 					= 0xffffffff;
2641 			}
2642 		}
2643 
2644 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2645 
2646 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2647 
2648 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2649 
2650 		dc_release_state(dm->cached_dc_state);
2651 		dm->cached_dc_state = NULL;
2652 
2653 		amdgpu_dm_irq_resume_late(adev);
2654 
2655 		mutex_unlock(&dm->dc_lock);
2656 
2657 		return 0;
2658 	}
2659 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2660 	dc_release_state(dm_state->context);
2661 	dm_state->context = dc_create_state(dm->dc);
2662 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2663 	dc_resource_state_construct(dm->dc, dm_state->context);
2664 
2665 	/* Re-enable outbox interrupts for DPIA. */
2666 	if (dc_enable_dmub_notifications(adev->dm.dc))
2667 		amdgpu_dm_outbox_init(adev);
2668 
2669 	/* Before powering on DC we need to re-initialize DMUB. */
2670 	dm_dmub_hw_resume(adev);
2671 
2672 	/* power on hardware */
2673 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2674 
2675 	/* program HPD filter */
2676 	dc_resume(dm->dc);
2677 
2678 	/*
2679 	 * early enable HPD Rx IRQ, should be done before set mode as short
2680 	 * pulse interrupts are used for MST
2681 	 */
2682 	amdgpu_dm_irq_resume_early(adev);
2683 
2684 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2685 	s3_handle_mst(ddev, false);
2686 
2687 	/* Do detection*/
2688 	drm_connector_list_iter_begin(ddev, &iter);
2689 	drm_for_each_connector_iter(connector, &iter) {
2690 		aconnector = to_amdgpu_dm_connector(connector);
2691 
2692 		/*
2693 		 * this is the case when traversing through already created
2694 		 * MST connectors, should be skipped
2695 		 */
2696 		if (aconnector->dc_link &&
2697 		    aconnector->dc_link->type == dc_connection_mst_branch)
2698 			continue;
2699 
2700 		mutex_lock(&aconnector->hpd_lock);
2701 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2702 			DRM_ERROR("KMS: Failed to detect connector\n");
2703 
2704 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2705 			emulated_link_detect(aconnector->dc_link);
2706 		else
2707 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2708 
2709 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2710 			aconnector->fake_enable = false;
2711 
2712 		if (aconnector->dc_sink)
2713 			dc_sink_release(aconnector->dc_sink);
2714 		aconnector->dc_sink = NULL;
2715 		amdgpu_dm_update_connector_after_detect(aconnector);
2716 		mutex_unlock(&aconnector->hpd_lock);
2717 	}
2718 	drm_connector_list_iter_end(&iter);
2719 
2720 	/* Force mode set in atomic commit */
2721 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2722 		new_crtc_state->active_changed = true;
2723 
2724 	/*
2725 	 * atomic_check is expected to create the dc states. We need to release
2726 	 * them here, since they were duplicated as part of the suspend
2727 	 * procedure.
2728 	 */
2729 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2731 		if (dm_new_crtc_state->stream) {
2732 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2733 			dc_stream_release(dm_new_crtc_state->stream);
2734 			dm_new_crtc_state->stream = NULL;
2735 		}
2736 	}
2737 
2738 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2739 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2740 		if (dm_new_plane_state->dc_state) {
2741 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2742 			dc_plane_state_release(dm_new_plane_state->dc_state);
2743 			dm_new_plane_state->dc_state = NULL;
2744 		}
2745 	}
2746 
2747 	drm_atomic_helper_resume(ddev, dm->cached_state);
2748 
2749 	dm->cached_state = NULL;
2750 
2751 	amdgpu_dm_irq_resume_late(adev);
2752 
2753 	amdgpu_dm_smu_write_watermarks_table(adev);
2754 
2755 	return 0;
2756 }
2757 
2758 /**
2759  * DOC: DM Lifecycle
2760  *
2761  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2762  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2763  * the base driver's device list to be initialized and torn down accordingly.
2764  *
2765  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2766  */
2767 
2768 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2769 	.name = "dm",
2770 	.early_init = dm_early_init,
2771 	.late_init = dm_late_init,
2772 	.sw_init = dm_sw_init,
2773 	.sw_fini = dm_sw_fini,
2774 	.early_fini = amdgpu_dm_early_fini,
2775 	.hw_init = dm_hw_init,
2776 	.hw_fini = dm_hw_fini,
2777 	.suspend = dm_suspend,
2778 	.resume = dm_resume,
2779 	.is_idle = dm_is_idle,
2780 	.wait_for_idle = dm_wait_for_idle,
2781 	.check_soft_reset = dm_check_soft_reset,
2782 	.soft_reset = dm_soft_reset,
2783 	.set_clockgating_state = dm_set_clockgating_state,
2784 	.set_powergating_state = dm_set_powergating_state,
2785 };
2786 
2787 const struct amdgpu_ip_block_version dm_ip_block =
2788 {
2789 	.type = AMD_IP_BLOCK_TYPE_DCE,
2790 	.major = 1,
2791 	.minor = 0,
2792 	.rev = 0,
2793 	.funcs = &amdgpu_dm_funcs,
2794 };
2795 
2796 
2797 /**
2798  * DOC: atomic
2799  *
2800  * *WIP*
2801  */
2802 
2803 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2804 	.fb_create = amdgpu_display_user_framebuffer_create,
2805 	.get_format_info = amd_get_format_info,
2806 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2807 	.atomic_check = amdgpu_dm_atomic_check,
2808 	.atomic_commit = drm_atomic_helper_commit,
2809 };
2810 
2811 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2812 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2813 };
2814 
2815 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2816 {
2817 	u32 max_cll, min_cll, max, min, q, r;
2818 	struct amdgpu_dm_backlight_caps *caps;
2819 	struct amdgpu_display_manager *dm;
2820 	struct drm_connector *conn_base;
2821 	struct amdgpu_device *adev;
2822 	struct dc_link *link = NULL;
2823 	static const u8 pre_computed_values[] = {
2824 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2825 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2826 	int i;
2827 
2828 	if (!aconnector || !aconnector->dc_link)
2829 		return;
2830 
2831 	link = aconnector->dc_link;
2832 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2833 		return;
2834 
2835 	conn_base = &aconnector->base;
2836 	adev = drm_to_adev(conn_base->dev);
2837 	dm = &adev->dm;
2838 	for (i = 0; i < dm->num_of_edps; i++) {
2839 		if (link == dm->backlight_link[i])
2840 			break;
2841 	}
2842 	if (i >= dm->num_of_edps)
2843 		return;
2844 	caps = &dm->backlight_caps[i];
2845 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2846 	caps->aux_support = false;
2847 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2848 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2849 
2850 	if (caps->ext_caps->bits.oled == 1 /*||
2851 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2852 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2853 		caps->aux_support = true;
2854 
2855 	if (amdgpu_backlight == 0)
2856 		caps->aux_support = false;
2857 	else if (amdgpu_backlight == 1)
2858 		caps->aux_support = true;
2859 
2860 	/* From the specification (CTA-861-G), for calculating the maximum
2861 	 * luminance we need to use:
2862 	 *	Luminance = 50*2**(CV/32)
2863 	 * Where CV is a one-byte value.
2864 	 * For calculating this expression we may need float point precision;
2865 	 * to avoid this complexity level, we take advantage that CV is divided
2866 	 * by a constant. From the Euclids division algorithm, we know that CV
2867 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2868 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2869 	 * need to pre-compute the value of r/32. For pre-computing the values
2870 	 * We just used the following Ruby line:
2871 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2872 	 * The results of the above expressions can be verified at
2873 	 * pre_computed_values.
2874 	 */
2875 	q = max_cll >> 5;
2876 	r = max_cll % 32;
2877 	max = (1 << q) * pre_computed_values[r];
2878 
2879 	// min luminance: maxLum * (CV/255)^2 / 100
2880 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2881 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2882 
2883 	caps->aux_max_input_signal = max;
2884 	caps->aux_min_input_signal = min;
2885 }
2886 
2887 void amdgpu_dm_update_connector_after_detect(
2888 		struct amdgpu_dm_connector *aconnector)
2889 {
2890 	struct drm_connector *connector = &aconnector->base;
2891 	struct drm_device *dev = connector->dev;
2892 	struct dc_sink *sink;
2893 
2894 	/* MST handled by drm_mst framework */
2895 	if (aconnector->mst_mgr.mst_state == true)
2896 		return;
2897 
2898 	sink = aconnector->dc_link->local_sink;
2899 	if (sink)
2900 		dc_sink_retain(sink);
2901 
2902 	/*
2903 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2904 	 * the connector sink is set to either fake or physical sink depends on link status.
2905 	 * Skip if already done during boot.
2906 	 */
2907 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2908 			&& aconnector->dc_em_sink) {
2909 
2910 		/*
2911 		 * For S3 resume with headless use eml_sink to fake stream
2912 		 * because on resume connector->sink is set to NULL
2913 		 */
2914 		mutex_lock(&dev->mode_config.mutex);
2915 
2916 		if (sink) {
2917 			if (aconnector->dc_sink) {
2918 				amdgpu_dm_update_freesync_caps(connector, NULL);
2919 				/*
2920 				 * retain and release below are used to
2921 				 * bump up refcount for sink because the link doesn't point
2922 				 * to it anymore after disconnect, so on next crtc to connector
2923 				 * reshuffle by UMD we will get into unwanted dc_sink release
2924 				 */
2925 				dc_sink_release(aconnector->dc_sink);
2926 			}
2927 			aconnector->dc_sink = sink;
2928 			dc_sink_retain(aconnector->dc_sink);
2929 			amdgpu_dm_update_freesync_caps(connector,
2930 					aconnector->edid);
2931 		} else {
2932 			amdgpu_dm_update_freesync_caps(connector, NULL);
2933 			if (!aconnector->dc_sink) {
2934 				aconnector->dc_sink = aconnector->dc_em_sink;
2935 				dc_sink_retain(aconnector->dc_sink);
2936 			}
2937 		}
2938 
2939 		mutex_unlock(&dev->mode_config.mutex);
2940 
2941 		if (sink)
2942 			dc_sink_release(sink);
2943 		return;
2944 	}
2945 
2946 	/*
2947 	 * TODO: temporary guard to look for proper fix
2948 	 * if this sink is MST sink, we should not do anything
2949 	 */
2950 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2951 		dc_sink_release(sink);
2952 		return;
2953 	}
2954 
2955 	if (aconnector->dc_sink == sink) {
2956 		/*
2957 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2958 		 * Do nothing!!
2959 		 */
2960 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2961 				aconnector->connector_id);
2962 		if (sink)
2963 			dc_sink_release(sink);
2964 		return;
2965 	}
2966 
2967 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2968 		aconnector->connector_id, aconnector->dc_sink, sink);
2969 
2970 	mutex_lock(&dev->mode_config.mutex);
2971 
2972 	/*
2973 	 * 1. Update status of the drm connector
2974 	 * 2. Send an event and let userspace tell us what to do
2975 	 */
2976 	if (sink) {
2977 		/*
2978 		 * TODO: check if we still need the S3 mode update workaround.
2979 		 * If yes, put it here.
2980 		 */
2981 		if (aconnector->dc_sink) {
2982 			amdgpu_dm_update_freesync_caps(connector, NULL);
2983 			dc_sink_release(aconnector->dc_sink);
2984 		}
2985 
2986 		aconnector->dc_sink = sink;
2987 		dc_sink_retain(aconnector->dc_sink);
2988 		if (sink->dc_edid.length == 0) {
2989 			aconnector->edid = NULL;
2990 			if (aconnector->dc_link->aux_mode) {
2991 				drm_dp_cec_unset_edid(
2992 					&aconnector->dm_dp_aux.aux);
2993 			}
2994 		} else {
2995 			aconnector->edid =
2996 				(struct edid *)sink->dc_edid.raw_edid;
2997 
2998 			if (aconnector->dc_link->aux_mode)
2999 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3000 						    aconnector->edid);
3001 		}
3002 
3003 		drm_connector_update_edid_property(connector, aconnector->edid);
3004 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3005 		update_connector_ext_caps(aconnector);
3006 	} else {
3007 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3008 		amdgpu_dm_update_freesync_caps(connector, NULL);
3009 		drm_connector_update_edid_property(connector, NULL);
3010 		aconnector->num_modes = 0;
3011 		dc_sink_release(aconnector->dc_sink);
3012 		aconnector->dc_sink = NULL;
3013 		aconnector->edid = NULL;
3014 #ifdef CONFIG_DRM_AMD_DC_HDCP
3015 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3016 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3017 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3018 #endif
3019 	}
3020 
3021 	mutex_unlock(&dev->mode_config.mutex);
3022 
3023 	update_subconnector_property(aconnector);
3024 
3025 	if (sink)
3026 		dc_sink_release(sink);
3027 }
3028 
3029 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3030 {
3031 	struct drm_connector *connector = &aconnector->base;
3032 	struct drm_device *dev = connector->dev;
3033 	enum dc_connection_type new_connection_type = dc_connection_none;
3034 	struct amdgpu_device *adev = drm_to_adev(dev);
3035 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3036 	struct dm_crtc_state *dm_crtc_state = NULL;
3037 
3038 	if (adev->dm.disable_hpd_irq)
3039 		return;
3040 
3041 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3042 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3043 					dm_con_state->base.state,
3044 					dm_con_state->base.crtc));
3045 	/*
3046 	 * In case of failure or MST no need to update connector status or notify the OS
3047 	 * since (for MST case) MST does this in its own context.
3048 	 */
3049 	mutex_lock(&aconnector->hpd_lock);
3050 
3051 #ifdef CONFIG_DRM_AMD_DC_HDCP
3052 	if (adev->dm.hdcp_workqueue) {
3053 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3054 		dm_con_state->update_hdcp = true;
3055 	}
3056 #endif
3057 	if (aconnector->fake_enable)
3058 		aconnector->fake_enable = false;
3059 
3060 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3061 		DRM_ERROR("KMS: Failed to detect connector\n");
3062 
3063 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3064 		emulated_link_detect(aconnector->dc_link);
3065 
3066 		drm_modeset_lock_all(dev);
3067 		dm_restore_drm_connector_state(dev, connector);
3068 		drm_modeset_unlock_all(dev);
3069 
3070 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3071 			drm_kms_helper_connector_hotplug_event(connector);
3072 
3073 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3074 		if (new_connection_type == dc_connection_none &&
3075 		    aconnector->dc_link->type == dc_connection_none &&
3076 		    dm_crtc_state)
3077 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3078 
3079 		amdgpu_dm_update_connector_after_detect(aconnector);
3080 
3081 		drm_modeset_lock_all(dev);
3082 		dm_restore_drm_connector_state(dev, connector);
3083 		drm_modeset_unlock_all(dev);
3084 
3085 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3086 			drm_kms_helper_connector_hotplug_event(connector);
3087 	}
3088 	mutex_unlock(&aconnector->hpd_lock);
3089 
3090 }
3091 
3092 static void handle_hpd_irq(void *param)
3093 {
3094 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3095 
3096 	handle_hpd_irq_helper(aconnector);
3097 
3098 }
3099 
3100 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3101 {
3102 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3103 	uint8_t dret;
3104 	bool new_irq_handled = false;
3105 	int dpcd_addr;
3106 	int dpcd_bytes_to_read;
3107 
3108 	const int max_process_count = 30;
3109 	int process_count = 0;
3110 
3111 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3112 
3113 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3114 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3115 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3116 		dpcd_addr = DP_SINK_COUNT;
3117 	} else {
3118 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3119 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3120 		dpcd_addr = DP_SINK_COUNT_ESI;
3121 	}
3122 
3123 	dret = drm_dp_dpcd_read(
3124 		&aconnector->dm_dp_aux.aux,
3125 		dpcd_addr,
3126 		esi,
3127 		dpcd_bytes_to_read);
3128 
3129 	while (dret == dpcd_bytes_to_read &&
3130 		process_count < max_process_count) {
3131 		uint8_t retry;
3132 		dret = 0;
3133 
3134 		process_count++;
3135 
3136 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3137 		/* handle HPD short pulse irq */
3138 		if (aconnector->mst_mgr.mst_state)
3139 			drm_dp_mst_hpd_irq(
3140 				&aconnector->mst_mgr,
3141 				esi,
3142 				&new_irq_handled);
3143 
3144 		if (new_irq_handled) {
3145 			/* ACK at DPCD to notify down stream */
3146 			const int ack_dpcd_bytes_to_write =
3147 				dpcd_bytes_to_read - 1;
3148 
3149 			for (retry = 0; retry < 3; retry++) {
3150 				uint8_t wret;
3151 
3152 				wret = drm_dp_dpcd_write(
3153 					&aconnector->dm_dp_aux.aux,
3154 					dpcd_addr + 1,
3155 					&esi[1],
3156 					ack_dpcd_bytes_to_write);
3157 				if (wret == ack_dpcd_bytes_to_write)
3158 					break;
3159 			}
3160 
3161 			/* check if there is new irq to be handled */
3162 			dret = drm_dp_dpcd_read(
3163 				&aconnector->dm_dp_aux.aux,
3164 				dpcd_addr,
3165 				esi,
3166 				dpcd_bytes_to_read);
3167 
3168 			new_irq_handled = false;
3169 		} else {
3170 			break;
3171 		}
3172 	}
3173 
3174 	if (process_count == max_process_count)
3175 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3176 }
3177 
3178 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3179 							union hpd_irq_data hpd_irq_data)
3180 {
3181 	struct hpd_rx_irq_offload_work *offload_work =
3182 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3183 
3184 	if (!offload_work) {
3185 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3186 		return;
3187 	}
3188 
3189 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3190 	offload_work->data = hpd_irq_data;
3191 	offload_work->offload_wq = offload_wq;
3192 
3193 	queue_work(offload_wq->wq, &offload_work->work);
3194 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3195 }
3196 
3197 static void handle_hpd_rx_irq(void *param)
3198 {
3199 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3200 	struct drm_connector *connector = &aconnector->base;
3201 	struct drm_device *dev = connector->dev;
3202 	struct dc_link *dc_link = aconnector->dc_link;
3203 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3204 	bool result = false;
3205 	enum dc_connection_type new_connection_type = dc_connection_none;
3206 	struct amdgpu_device *adev = drm_to_adev(dev);
3207 	union hpd_irq_data hpd_irq_data;
3208 	bool link_loss = false;
3209 	bool has_left_work = false;
3210 	int idx = aconnector->base.index;
3211 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3212 
3213 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3214 
3215 	if (adev->dm.disable_hpd_irq)
3216 		return;
3217 
3218 	/*
3219 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3220 	 * conflict, after implement i2c helper, this mutex should be
3221 	 * retired.
3222 	 */
3223 	mutex_lock(&aconnector->hpd_lock);
3224 
3225 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3226 						&link_loss, true, &has_left_work);
3227 
3228 	if (!has_left_work)
3229 		goto out;
3230 
3231 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3232 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3233 		goto out;
3234 	}
3235 
3236 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3237 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3238 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3239 			dm_handle_mst_sideband_msg(aconnector);
3240 			goto out;
3241 		}
3242 
3243 		if (link_loss) {
3244 			bool skip = false;
3245 
3246 			spin_lock(&offload_wq->offload_lock);
3247 			skip = offload_wq->is_handling_link_loss;
3248 
3249 			if (!skip)
3250 				offload_wq->is_handling_link_loss = true;
3251 
3252 			spin_unlock(&offload_wq->offload_lock);
3253 
3254 			if (!skip)
3255 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3256 
3257 			goto out;
3258 		}
3259 	}
3260 
3261 out:
3262 	if (result && !is_mst_root_connector) {
3263 		/* Downstream Port status changed. */
3264 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3265 			DRM_ERROR("KMS: Failed to detect connector\n");
3266 
3267 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3268 			emulated_link_detect(dc_link);
3269 
3270 			if (aconnector->fake_enable)
3271 				aconnector->fake_enable = false;
3272 
3273 			amdgpu_dm_update_connector_after_detect(aconnector);
3274 
3275 
3276 			drm_modeset_lock_all(dev);
3277 			dm_restore_drm_connector_state(dev, connector);
3278 			drm_modeset_unlock_all(dev);
3279 
3280 			drm_kms_helper_connector_hotplug_event(connector);
3281 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3282 
3283 			if (aconnector->fake_enable)
3284 				aconnector->fake_enable = false;
3285 
3286 			amdgpu_dm_update_connector_after_detect(aconnector);
3287 
3288 
3289 			drm_modeset_lock_all(dev);
3290 			dm_restore_drm_connector_state(dev, connector);
3291 			drm_modeset_unlock_all(dev);
3292 
3293 			drm_kms_helper_connector_hotplug_event(connector);
3294 		}
3295 	}
3296 #ifdef CONFIG_DRM_AMD_DC_HDCP
3297 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3298 		if (adev->dm.hdcp_workqueue)
3299 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3300 	}
3301 #endif
3302 
3303 	if (dc_link->type != dc_connection_mst_branch)
3304 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3305 
3306 	mutex_unlock(&aconnector->hpd_lock);
3307 }
3308 
3309 static void register_hpd_handlers(struct amdgpu_device *adev)
3310 {
3311 	struct drm_device *dev = adev_to_drm(adev);
3312 	struct drm_connector *connector;
3313 	struct amdgpu_dm_connector *aconnector;
3314 	const struct dc_link *dc_link;
3315 	struct dc_interrupt_params int_params = {0};
3316 
3317 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3318 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 
3320 	list_for_each_entry(connector,
3321 			&dev->mode_config.connector_list, head)	{
3322 
3323 		aconnector = to_amdgpu_dm_connector(connector);
3324 		dc_link = aconnector->dc_link;
3325 
3326 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3327 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328 			int_params.irq_source = dc_link->irq_source_hpd;
3329 
3330 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331 					handle_hpd_irq,
3332 					(void *) aconnector);
3333 		}
3334 
3335 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3336 
3337 			/* Also register for DP short pulse (hpd_rx). */
3338 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3339 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3340 
3341 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3342 					handle_hpd_rx_irq,
3343 					(void *) aconnector);
3344 
3345 			if (adev->dm.hpd_rx_offload_wq)
3346 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3347 					aconnector;
3348 		}
3349 	}
3350 }
3351 
3352 #if defined(CONFIG_DRM_AMD_DC_SI)
3353 /* Register IRQ sources and initialize IRQ callbacks */
3354 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3355 {
3356 	struct dc *dc = adev->dm.dc;
3357 	struct common_irq_params *c_irq_params;
3358 	struct dc_interrupt_params int_params = {0};
3359 	int r;
3360 	int i;
3361 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3362 
3363 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3364 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 
3366 	/*
3367 	 * Actions of amdgpu_irq_add_id():
3368 	 * 1. Register a set() function with base driver.
3369 	 *    Base driver will call set() function to enable/disable an
3370 	 *    interrupt in DC hardware.
3371 	 * 2. Register amdgpu_dm_irq_handler().
3372 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3373 	 *    coming from DC hardware.
3374 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3375 	 *    for acknowledging and handling. */
3376 
3377 	/* Use VBLANK interrupt */
3378 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3379 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3380 		if (r) {
3381 			DRM_ERROR("Failed to add crtc irq id!\n");
3382 			return r;
3383 		}
3384 
3385 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 		int_params.irq_source =
3387 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3388 
3389 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3390 
3391 		c_irq_params->adev = adev;
3392 		c_irq_params->irq_src = int_params.irq_source;
3393 
3394 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 				dm_crtc_high_irq, c_irq_params);
3396 	}
3397 
3398 	/* Use GRPH_PFLIP interrupt */
3399 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3400 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3401 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3402 		if (r) {
3403 			DRM_ERROR("Failed to add page flip irq id!\n");
3404 			return r;
3405 		}
3406 
3407 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3408 		int_params.irq_source =
3409 			dc_interrupt_to_irq_source(dc, i, 0);
3410 
3411 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3412 
3413 		c_irq_params->adev = adev;
3414 		c_irq_params->irq_src = int_params.irq_source;
3415 
3416 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3417 				dm_pflip_high_irq, c_irq_params);
3418 
3419 	}
3420 
3421 	/* HPD */
3422 	r = amdgpu_irq_add_id(adev, client_id,
3423 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3424 	if (r) {
3425 		DRM_ERROR("Failed to add hpd irq id!\n");
3426 		return r;
3427 	}
3428 
3429 	register_hpd_handlers(adev);
3430 
3431 	return 0;
3432 }
3433 #endif
3434 
3435 /* Register IRQ sources and initialize IRQ callbacks */
3436 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3437 {
3438 	struct dc *dc = adev->dm.dc;
3439 	struct common_irq_params *c_irq_params;
3440 	struct dc_interrupt_params int_params = {0};
3441 	int r;
3442 	int i;
3443 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3444 
3445 	if (adev->family >= AMDGPU_FAMILY_AI)
3446 		client_id = SOC15_IH_CLIENTID_DCE;
3447 
3448 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3449 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 
3451 	/*
3452 	 * Actions of amdgpu_irq_add_id():
3453 	 * 1. Register a set() function with base driver.
3454 	 *    Base driver will call set() function to enable/disable an
3455 	 *    interrupt in DC hardware.
3456 	 * 2. Register amdgpu_dm_irq_handler().
3457 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3458 	 *    coming from DC hardware.
3459 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3460 	 *    for acknowledging and handling. */
3461 
3462 	/* Use VBLANK interrupt */
3463 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3464 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3465 		if (r) {
3466 			DRM_ERROR("Failed to add crtc irq id!\n");
3467 			return r;
3468 		}
3469 
3470 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 		int_params.irq_source =
3472 			dc_interrupt_to_irq_source(dc, i, 0);
3473 
3474 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3475 
3476 		c_irq_params->adev = adev;
3477 		c_irq_params->irq_src = int_params.irq_source;
3478 
3479 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 				dm_crtc_high_irq, c_irq_params);
3481 	}
3482 
3483 	/* Use VUPDATE interrupt */
3484 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3485 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3486 		if (r) {
3487 			DRM_ERROR("Failed to add vupdate irq id!\n");
3488 			return r;
3489 		}
3490 
3491 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 		int_params.irq_source =
3493 			dc_interrupt_to_irq_source(dc, i, 0);
3494 
3495 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3496 
3497 		c_irq_params->adev = adev;
3498 		c_irq_params->irq_src = int_params.irq_source;
3499 
3500 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 				dm_vupdate_high_irq, c_irq_params);
3502 	}
3503 
3504 	/* Use GRPH_PFLIP interrupt */
3505 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3506 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3507 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3508 		if (r) {
3509 			DRM_ERROR("Failed to add page flip irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 				dm_pflip_high_irq, c_irq_params);
3524 
3525 	}
3526 
3527 	/* HPD */
3528 	r = amdgpu_irq_add_id(adev, client_id,
3529 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3530 	if (r) {
3531 		DRM_ERROR("Failed to add hpd irq id!\n");
3532 		return r;
3533 	}
3534 
3535 	register_hpd_handlers(adev);
3536 
3537 	return 0;
3538 }
3539 
3540 /* Register IRQ sources and initialize IRQ callbacks */
3541 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3542 {
3543 	struct dc *dc = adev->dm.dc;
3544 	struct common_irq_params *c_irq_params;
3545 	struct dc_interrupt_params int_params = {0};
3546 	int r;
3547 	int i;
3548 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3549 	static const unsigned int vrtl_int_srcid[] = {
3550 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3551 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3554 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3555 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3556 	};
3557 #endif
3558 
3559 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3560 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 
3562 	/*
3563 	 * Actions of amdgpu_irq_add_id():
3564 	 * 1. Register a set() function with base driver.
3565 	 *    Base driver will call set() function to enable/disable an
3566 	 *    interrupt in DC hardware.
3567 	 * 2. Register amdgpu_dm_irq_handler().
3568 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3569 	 *    coming from DC hardware.
3570 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3571 	 *    for acknowledging and handling.
3572 	 */
3573 
3574 	/* Use VSTARTUP interrupt */
3575 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3576 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3577 			i++) {
3578 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3579 
3580 		if (r) {
3581 			DRM_ERROR("Failed to add crtc irq id!\n");
3582 			return r;
3583 		}
3584 
3585 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3586 		int_params.irq_source =
3587 			dc_interrupt_to_irq_source(dc, i, 0);
3588 
3589 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3590 
3591 		c_irq_params->adev = adev;
3592 		c_irq_params->irq_src = int_params.irq_source;
3593 
3594 		amdgpu_dm_irq_register_interrupt(
3595 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3596 	}
3597 
3598 	/* Use otg vertical line interrupt */
3599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3600 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3601 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3602 				vrtl_int_srcid[i], &adev->vline0_irq);
3603 
3604 		if (r) {
3605 			DRM_ERROR("Failed to add vline0 irq id!\n");
3606 			return r;
3607 		}
3608 
3609 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 		int_params.irq_source =
3611 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3612 
3613 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3614 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3615 			break;
3616 		}
3617 
3618 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3619 					- DC_IRQ_SOURCE_DC1_VLINE0];
3620 
3621 		c_irq_params->adev = adev;
3622 		c_irq_params->irq_src = int_params.irq_source;
3623 
3624 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3625 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3626 	}
3627 #endif
3628 
3629 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3630 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3631 	 * to trigger at end of each vblank, regardless of state of the lock,
3632 	 * matching DCE behaviour.
3633 	 */
3634 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3635 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3636 	     i++) {
3637 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3638 
3639 		if (r) {
3640 			DRM_ERROR("Failed to add vupdate irq id!\n");
3641 			return r;
3642 		}
3643 
3644 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3645 		int_params.irq_source =
3646 			dc_interrupt_to_irq_source(dc, i, 0);
3647 
3648 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3649 
3650 		c_irq_params->adev = adev;
3651 		c_irq_params->irq_src = int_params.irq_source;
3652 
3653 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3654 				dm_vupdate_high_irq, c_irq_params);
3655 	}
3656 
3657 	/* Use GRPH_PFLIP interrupt */
3658 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3659 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3660 			i++) {
3661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3662 		if (r) {
3663 			DRM_ERROR("Failed to add page flip irq id!\n");
3664 			return r;
3665 		}
3666 
3667 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668 		int_params.irq_source =
3669 			dc_interrupt_to_irq_source(dc, i, 0);
3670 
3671 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3672 
3673 		c_irq_params->adev = adev;
3674 		c_irq_params->irq_src = int_params.irq_source;
3675 
3676 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677 				dm_pflip_high_irq, c_irq_params);
3678 
3679 	}
3680 
3681 	/* HPD */
3682 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3683 			&adev->hpd_irq);
3684 	if (r) {
3685 		DRM_ERROR("Failed to add hpd irq id!\n");
3686 		return r;
3687 	}
3688 
3689 	register_hpd_handlers(adev);
3690 
3691 	return 0;
3692 }
3693 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3694 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3695 {
3696 	struct dc *dc = adev->dm.dc;
3697 	struct common_irq_params *c_irq_params;
3698 	struct dc_interrupt_params int_params = {0};
3699 	int r, i;
3700 
3701 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3702 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 
3704 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3705 			&adev->dmub_outbox_irq);
3706 	if (r) {
3707 		DRM_ERROR("Failed to add outbox irq id!\n");
3708 		return r;
3709 	}
3710 
3711 	if (dc->ctx->dmub_srv) {
3712 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3713 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3714 		int_params.irq_source =
3715 		dc_interrupt_to_irq_source(dc, i, 0);
3716 
3717 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3718 
3719 		c_irq_params->adev = adev;
3720 		c_irq_params->irq_src = int_params.irq_source;
3721 
3722 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3723 				dm_dmub_outbox1_low_irq, c_irq_params);
3724 	}
3725 
3726 	return 0;
3727 }
3728 
3729 /*
3730  * Acquires the lock for the atomic state object and returns
3731  * the new atomic state.
3732  *
3733  * This should only be called during atomic check.
3734  */
3735 int dm_atomic_get_state(struct drm_atomic_state *state,
3736 			struct dm_atomic_state **dm_state)
3737 {
3738 	struct drm_device *dev = state->dev;
3739 	struct amdgpu_device *adev = drm_to_adev(dev);
3740 	struct amdgpu_display_manager *dm = &adev->dm;
3741 	struct drm_private_state *priv_state;
3742 
3743 	if (*dm_state)
3744 		return 0;
3745 
3746 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3747 	if (IS_ERR(priv_state))
3748 		return PTR_ERR(priv_state);
3749 
3750 	*dm_state = to_dm_atomic_state(priv_state);
3751 
3752 	return 0;
3753 }
3754 
3755 static struct dm_atomic_state *
3756 dm_atomic_get_new_state(struct drm_atomic_state *state)
3757 {
3758 	struct drm_device *dev = state->dev;
3759 	struct amdgpu_device *adev = drm_to_adev(dev);
3760 	struct amdgpu_display_manager *dm = &adev->dm;
3761 	struct drm_private_obj *obj;
3762 	struct drm_private_state *new_obj_state;
3763 	int i;
3764 
3765 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3766 		if (obj->funcs == dm->atomic_obj.funcs)
3767 			return to_dm_atomic_state(new_obj_state);
3768 	}
3769 
3770 	return NULL;
3771 }
3772 
3773 static struct drm_private_state *
3774 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3775 {
3776 	struct dm_atomic_state *old_state, *new_state;
3777 
3778 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3779 	if (!new_state)
3780 		return NULL;
3781 
3782 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3783 
3784 	old_state = to_dm_atomic_state(obj->state);
3785 
3786 	if (old_state && old_state->context)
3787 		new_state->context = dc_copy_state(old_state->context);
3788 
3789 	if (!new_state->context) {
3790 		kfree(new_state);
3791 		return NULL;
3792 	}
3793 
3794 	return &new_state->base;
3795 }
3796 
3797 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3798 				    struct drm_private_state *state)
3799 {
3800 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3801 
3802 	if (dm_state && dm_state->context)
3803 		dc_release_state(dm_state->context);
3804 
3805 	kfree(dm_state);
3806 }
3807 
3808 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3809 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3810 	.atomic_destroy_state = dm_atomic_destroy_state,
3811 };
3812 
3813 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3814 {
3815 	struct dm_atomic_state *state;
3816 	int r;
3817 
3818 	adev->mode_info.mode_config_initialized = true;
3819 
3820 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3821 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3822 
3823 	adev_to_drm(adev)->mode_config.max_width = 16384;
3824 	adev_to_drm(adev)->mode_config.max_height = 16384;
3825 
3826 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3827 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3828 	/* indicates support for immediate flip */
3829 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3830 
3831 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3832 
3833 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3834 	if (!state)
3835 		return -ENOMEM;
3836 
3837 	state->context = dc_create_state(adev->dm.dc);
3838 	if (!state->context) {
3839 		kfree(state);
3840 		return -ENOMEM;
3841 	}
3842 
3843 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3844 
3845 	drm_atomic_private_obj_init(adev_to_drm(adev),
3846 				    &adev->dm.atomic_obj,
3847 				    &state->base,
3848 				    &dm_atomic_state_funcs);
3849 
3850 	r = amdgpu_display_modeset_create_props(adev);
3851 	if (r) {
3852 		dc_release_state(state->context);
3853 		kfree(state);
3854 		return r;
3855 	}
3856 
3857 	r = amdgpu_dm_audio_init(adev);
3858 	if (r) {
3859 		dc_release_state(state->context);
3860 		kfree(state);
3861 		return r;
3862 	}
3863 
3864 	return 0;
3865 }
3866 
3867 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3868 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3869 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3870 
3871 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3872 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3873 
3874 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3875 					    int bl_idx)
3876 {
3877 #if defined(CONFIG_ACPI)
3878 	struct amdgpu_dm_backlight_caps caps;
3879 
3880 	memset(&caps, 0, sizeof(caps));
3881 
3882 	if (dm->backlight_caps[bl_idx].caps_valid)
3883 		return;
3884 
3885 	amdgpu_acpi_get_backlight_caps(&caps);
3886 	if (caps.caps_valid) {
3887 		dm->backlight_caps[bl_idx].caps_valid = true;
3888 		if (caps.aux_support)
3889 			return;
3890 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3891 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3892 	} else {
3893 		dm->backlight_caps[bl_idx].min_input_signal =
3894 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3895 		dm->backlight_caps[bl_idx].max_input_signal =
3896 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3897 	}
3898 #else
3899 	if (dm->backlight_caps[bl_idx].aux_support)
3900 		return;
3901 
3902 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3903 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3904 #endif
3905 }
3906 
3907 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3908 				unsigned *min, unsigned *max)
3909 {
3910 	if (!caps)
3911 		return 0;
3912 
3913 	if (caps->aux_support) {
3914 		// Firmware limits are in nits, DC API wants millinits.
3915 		*max = 1000 * caps->aux_max_input_signal;
3916 		*min = 1000 * caps->aux_min_input_signal;
3917 	} else {
3918 		// Firmware limits are 8-bit, PWM control is 16-bit.
3919 		*max = 0x101 * caps->max_input_signal;
3920 		*min = 0x101 * caps->min_input_signal;
3921 	}
3922 	return 1;
3923 }
3924 
3925 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3926 					uint32_t brightness)
3927 {
3928 	unsigned min, max;
3929 
3930 	if (!get_brightness_range(caps, &min, &max))
3931 		return brightness;
3932 
3933 	// Rescale 0..255 to min..max
3934 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3935 				       AMDGPU_MAX_BL_LEVEL);
3936 }
3937 
3938 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3939 				      uint32_t brightness)
3940 {
3941 	unsigned min, max;
3942 
3943 	if (!get_brightness_range(caps, &min, &max))
3944 		return brightness;
3945 
3946 	if (brightness < min)
3947 		return 0;
3948 	// Rescale min..max to 0..255
3949 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3950 				 max - min);
3951 }
3952 
3953 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3954 					 int bl_idx,
3955 					 u32 user_brightness)
3956 {
3957 	struct amdgpu_dm_backlight_caps caps;
3958 	struct dc_link *link;
3959 	u32 brightness;
3960 	bool rc;
3961 
3962 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963 	caps = dm->backlight_caps[bl_idx];
3964 
3965 	dm->brightness[bl_idx] = user_brightness;
3966 	/* update scratch register */
3967 	if (bl_idx == 0)
3968 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3969 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3970 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3971 
3972 	/* Change brightness based on AUX property */
3973 	if (caps.aux_support) {
3974 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3975 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3976 		if (!rc)
3977 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3978 	} else {
3979 		rc = dc_link_set_backlight_level(link, brightness, 0);
3980 		if (!rc)
3981 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3982 	}
3983 
3984 	if (rc)
3985 		dm->actual_brightness[bl_idx] = user_brightness;
3986 }
3987 
3988 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3989 {
3990 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3991 	int i;
3992 
3993 	for (i = 0; i < dm->num_of_edps; i++) {
3994 		if (bd == dm->backlight_dev[i])
3995 			break;
3996 	}
3997 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3998 		i = 0;
3999 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4000 
4001 	return 0;
4002 }
4003 
4004 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4005 					 int bl_idx)
4006 {
4007 	struct amdgpu_dm_backlight_caps caps;
4008 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4009 
4010 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4011 	caps = dm->backlight_caps[bl_idx];
4012 
4013 	if (caps.aux_support) {
4014 		u32 avg, peak;
4015 		bool rc;
4016 
4017 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4018 		if (!rc)
4019 			return dm->brightness[bl_idx];
4020 		return convert_brightness_to_user(&caps, avg);
4021 	} else {
4022 		int ret = dc_link_get_backlight_level(link);
4023 
4024 		if (ret == DC_ERROR_UNEXPECTED)
4025 			return dm->brightness[bl_idx];
4026 		return convert_brightness_to_user(&caps, ret);
4027 	}
4028 }
4029 
4030 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4031 {
4032 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4033 	int i;
4034 
4035 	for (i = 0; i < dm->num_of_edps; i++) {
4036 		if (bd == dm->backlight_dev[i])
4037 			break;
4038 	}
4039 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4040 		i = 0;
4041 	return amdgpu_dm_backlight_get_level(dm, i);
4042 }
4043 
4044 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4045 	.options = BL_CORE_SUSPENDRESUME,
4046 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4047 	.update_status	= amdgpu_dm_backlight_update_status,
4048 };
4049 
4050 static void
4051 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4052 {
4053 	char bl_name[16];
4054 	struct backlight_properties props = { 0 };
4055 
4056 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4057 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4058 
4059 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4060 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4061 	props.type = BACKLIGHT_RAW;
4062 
4063 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4064 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4065 
4066 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4067 								       adev_to_drm(dm->adev)->dev,
4068 								       dm,
4069 								       &amdgpu_dm_backlight_ops,
4070 								       &props);
4071 
4072 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4073 		DRM_ERROR("DM: Backlight registration failed!\n");
4074 	else
4075 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4076 }
4077 #endif
4078 
4079 static int initialize_plane(struct amdgpu_display_manager *dm,
4080 			    struct amdgpu_mode_info *mode_info, int plane_id,
4081 			    enum drm_plane_type plane_type,
4082 			    const struct dc_plane_cap *plane_cap)
4083 {
4084 	struct drm_plane *plane;
4085 	unsigned long possible_crtcs;
4086 	int ret = 0;
4087 
4088 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4089 	if (!plane) {
4090 		DRM_ERROR("KMS: Failed to allocate plane\n");
4091 		return -ENOMEM;
4092 	}
4093 	plane->type = plane_type;
4094 
4095 	/*
4096 	 * HACK: IGT tests expect that the primary plane for a CRTC
4097 	 * can only have one possible CRTC. Only expose support for
4098 	 * any CRTC if they're not going to be used as a primary plane
4099 	 * for a CRTC - like overlay or underlay planes.
4100 	 */
4101 	possible_crtcs = 1 << plane_id;
4102 	if (plane_id >= dm->dc->caps.max_streams)
4103 		possible_crtcs = 0xff;
4104 
4105 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4106 
4107 	if (ret) {
4108 		DRM_ERROR("KMS: Failed to initialize plane\n");
4109 		kfree(plane);
4110 		return ret;
4111 	}
4112 
4113 	if (mode_info)
4114 		mode_info->planes[plane_id] = plane;
4115 
4116 	return ret;
4117 }
4118 
4119 
4120 static void register_backlight_device(struct amdgpu_display_manager *dm,
4121 				      struct dc_link *link)
4122 {
4123 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4124 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4125 
4126 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4127 	    link->type != dc_connection_none) {
4128 		/*
4129 		 * Event if registration failed, we should continue with
4130 		 * DM initialization because not having a backlight control
4131 		 * is better then a black screen.
4132 		 */
4133 		if (!dm->backlight_dev[dm->num_of_edps])
4134 			amdgpu_dm_register_backlight_device(dm);
4135 
4136 		if (dm->backlight_dev[dm->num_of_edps]) {
4137 			dm->backlight_link[dm->num_of_edps] = link;
4138 			dm->num_of_edps++;
4139 		}
4140 	}
4141 #endif
4142 }
4143 
4144 
4145 /*
4146  * In this architecture, the association
4147  * connector -> encoder -> crtc
4148  * id not really requried. The crtc and connector will hold the
4149  * display_index as an abstraction to use with DAL component
4150  *
4151  * Returns 0 on success
4152  */
4153 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4154 {
4155 	struct amdgpu_display_manager *dm = &adev->dm;
4156 	int32_t i;
4157 	struct amdgpu_dm_connector *aconnector = NULL;
4158 	struct amdgpu_encoder *aencoder = NULL;
4159 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4160 	uint32_t link_cnt;
4161 	int32_t primary_planes;
4162 	enum dc_connection_type new_connection_type = dc_connection_none;
4163 	const struct dc_plane_cap *plane;
4164 	bool psr_feature_enabled = false;
4165 
4166 	dm->display_indexes_num = dm->dc->caps.max_streams;
4167 	/* Update the actual used number of crtc */
4168 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4169 
4170 	link_cnt = dm->dc->caps.max_links;
4171 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4172 		DRM_ERROR("DM: Failed to initialize mode config\n");
4173 		return -EINVAL;
4174 	}
4175 
4176 	/* There is one primary plane per CRTC */
4177 	primary_planes = dm->dc->caps.max_streams;
4178 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4179 
4180 	/*
4181 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4182 	 * Order is reversed to match iteration order in atomic check.
4183 	 */
4184 	for (i = (primary_planes - 1); i >= 0; i--) {
4185 		plane = &dm->dc->caps.planes[i];
4186 
4187 		if (initialize_plane(dm, mode_info, i,
4188 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4189 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4190 			goto fail;
4191 		}
4192 	}
4193 
4194 	/*
4195 	 * Initialize overlay planes, index starting after primary planes.
4196 	 * These planes have a higher DRM index than the primary planes since
4197 	 * they should be considered as having a higher z-order.
4198 	 * Order is reversed to match iteration order in atomic check.
4199 	 *
4200 	 * Only support DCN for now, and only expose one so we don't encourage
4201 	 * userspace to use up all the pipes.
4202 	 */
4203 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4204 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4205 
4206 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4207 			continue;
4208 
4209 		if (!plane->blends_with_above || !plane->blends_with_below)
4210 			continue;
4211 
4212 		if (!plane->pixel_format_support.argb8888)
4213 			continue;
4214 
4215 		if (initialize_plane(dm, NULL, primary_planes + i,
4216 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4217 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4218 			goto fail;
4219 		}
4220 
4221 		/* Only create one overlay plane. */
4222 		break;
4223 	}
4224 
4225 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4226 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4227 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4228 			goto fail;
4229 		}
4230 
4231 	/* Use Outbox interrupt */
4232 	switch (adev->ip_versions[DCE_HWIP][0]) {
4233 	case IP_VERSION(3, 0, 0):
4234 	case IP_VERSION(3, 1, 2):
4235 	case IP_VERSION(3, 1, 3):
4236 	case IP_VERSION(3, 1, 5):
4237 	case IP_VERSION(3, 1, 6):
4238 	case IP_VERSION(2, 1, 0):
4239 		if (register_outbox_irq_handlers(dm->adev)) {
4240 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4241 			goto fail;
4242 		}
4243 		break;
4244 	default:
4245 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4246 			      adev->ip_versions[DCE_HWIP][0]);
4247 	}
4248 
4249 	/* Determine whether to enable PSR support by default. */
4250 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4251 		switch (adev->ip_versions[DCE_HWIP][0]) {
4252 		case IP_VERSION(3, 1, 2):
4253 		case IP_VERSION(3, 1, 3):
4254 		case IP_VERSION(3, 1, 5):
4255 		case IP_VERSION(3, 1, 6):
4256 			psr_feature_enabled = true;
4257 			break;
4258 		default:
4259 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4260 			break;
4261 		}
4262 	}
4263 
4264 	/* Disable vblank IRQs aggressively for power-saving. */
4265 	adev_to_drm(adev)->vblank_disable_immediate = true;
4266 
4267 	/* loops over all connectors on the board */
4268 	for (i = 0; i < link_cnt; i++) {
4269 		struct dc_link *link = NULL;
4270 
4271 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4272 			DRM_ERROR(
4273 				"KMS: Cannot support more than %d display indexes\n",
4274 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4275 			continue;
4276 		}
4277 
4278 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4279 		if (!aconnector)
4280 			goto fail;
4281 
4282 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4283 		if (!aencoder)
4284 			goto fail;
4285 
4286 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4287 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4288 			goto fail;
4289 		}
4290 
4291 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4292 			DRM_ERROR("KMS: Failed to initialize connector\n");
4293 			goto fail;
4294 		}
4295 
4296 		link = dc_get_link_at_index(dm->dc, i);
4297 
4298 		if (!dc_link_detect_sink(link, &new_connection_type))
4299 			DRM_ERROR("KMS: Failed to detect connector\n");
4300 
4301 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4302 			emulated_link_detect(link);
4303 			amdgpu_dm_update_connector_after_detect(aconnector);
4304 
4305 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4306 			amdgpu_dm_update_connector_after_detect(aconnector);
4307 			register_backlight_device(dm, link);
4308 			if (dm->num_of_edps)
4309 				update_connector_ext_caps(aconnector);
4310 			if (psr_feature_enabled)
4311 				amdgpu_dm_set_psr_caps(link);
4312 
4313 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4314 			 * PSR is also supported.
4315 			 */
4316 			if (link->psr_settings.psr_feature_enabled)
4317 				adev_to_drm(adev)->vblank_disable_immediate = false;
4318 		}
4319 
4320 
4321 	}
4322 
4323 	/* Software is initialized. Now we can register interrupt handlers. */
4324 	switch (adev->asic_type) {
4325 #if defined(CONFIG_DRM_AMD_DC_SI)
4326 	case CHIP_TAHITI:
4327 	case CHIP_PITCAIRN:
4328 	case CHIP_VERDE:
4329 	case CHIP_OLAND:
4330 		if (dce60_register_irq_handlers(dm->adev)) {
4331 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4332 			goto fail;
4333 		}
4334 		break;
4335 #endif
4336 	case CHIP_BONAIRE:
4337 	case CHIP_HAWAII:
4338 	case CHIP_KAVERI:
4339 	case CHIP_KABINI:
4340 	case CHIP_MULLINS:
4341 	case CHIP_TONGA:
4342 	case CHIP_FIJI:
4343 	case CHIP_CARRIZO:
4344 	case CHIP_STONEY:
4345 	case CHIP_POLARIS11:
4346 	case CHIP_POLARIS10:
4347 	case CHIP_POLARIS12:
4348 	case CHIP_VEGAM:
4349 	case CHIP_VEGA10:
4350 	case CHIP_VEGA12:
4351 	case CHIP_VEGA20:
4352 		if (dce110_register_irq_handlers(dm->adev)) {
4353 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4354 			goto fail;
4355 		}
4356 		break;
4357 	default:
4358 		switch (adev->ip_versions[DCE_HWIP][0]) {
4359 		case IP_VERSION(1, 0, 0):
4360 		case IP_VERSION(1, 0, 1):
4361 		case IP_VERSION(2, 0, 2):
4362 		case IP_VERSION(2, 0, 3):
4363 		case IP_VERSION(2, 0, 0):
4364 		case IP_VERSION(2, 1, 0):
4365 		case IP_VERSION(3, 0, 0):
4366 		case IP_VERSION(3, 0, 2):
4367 		case IP_VERSION(3, 0, 3):
4368 		case IP_VERSION(3, 0, 1):
4369 		case IP_VERSION(3, 1, 2):
4370 		case IP_VERSION(3, 1, 3):
4371 		case IP_VERSION(3, 1, 5):
4372 		case IP_VERSION(3, 1, 6):
4373 			if (dcn10_register_irq_handlers(dm->adev)) {
4374 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4375 				goto fail;
4376 			}
4377 			break;
4378 		default:
4379 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4380 					adev->ip_versions[DCE_HWIP][0]);
4381 			goto fail;
4382 		}
4383 		break;
4384 	}
4385 
4386 	return 0;
4387 fail:
4388 	kfree(aencoder);
4389 	kfree(aconnector);
4390 
4391 	return -EINVAL;
4392 }
4393 
4394 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4395 {
4396 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4397 	return;
4398 }
4399 
4400 /******************************************************************************
4401  * amdgpu_display_funcs functions
4402  *****************************************************************************/
4403 
4404 /*
4405  * dm_bandwidth_update - program display watermarks
4406  *
4407  * @adev: amdgpu_device pointer
4408  *
4409  * Calculate and program the display watermarks and line buffer allocation.
4410  */
4411 static void dm_bandwidth_update(struct amdgpu_device *adev)
4412 {
4413 	/* TODO: implement later */
4414 }
4415 
4416 static const struct amdgpu_display_funcs dm_display_funcs = {
4417 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4418 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4419 	.backlight_set_level = NULL, /* never called for DC */
4420 	.backlight_get_level = NULL, /* never called for DC */
4421 	.hpd_sense = NULL,/* called unconditionally */
4422 	.hpd_set_polarity = NULL, /* called unconditionally */
4423 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4424 	.page_flip_get_scanoutpos =
4425 		dm_crtc_get_scanoutpos,/* called unconditionally */
4426 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4427 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4428 };
4429 
4430 #if defined(CONFIG_DEBUG_KERNEL_DC)
4431 
4432 static ssize_t s3_debug_store(struct device *device,
4433 			      struct device_attribute *attr,
4434 			      const char *buf,
4435 			      size_t count)
4436 {
4437 	int ret;
4438 	int s3_state;
4439 	struct drm_device *drm_dev = dev_get_drvdata(device);
4440 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4441 
4442 	ret = kstrtoint(buf, 0, &s3_state);
4443 
4444 	if (ret == 0) {
4445 		if (s3_state) {
4446 			dm_resume(adev);
4447 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4448 		} else
4449 			dm_suspend(adev);
4450 	}
4451 
4452 	return ret == 0 ? count : 0;
4453 }
4454 
4455 DEVICE_ATTR_WO(s3_debug);
4456 
4457 #endif
4458 
4459 static int dm_early_init(void *handle)
4460 {
4461 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4462 
4463 	switch (adev->asic_type) {
4464 #if defined(CONFIG_DRM_AMD_DC_SI)
4465 	case CHIP_TAHITI:
4466 	case CHIP_PITCAIRN:
4467 	case CHIP_VERDE:
4468 		adev->mode_info.num_crtc = 6;
4469 		adev->mode_info.num_hpd = 6;
4470 		adev->mode_info.num_dig = 6;
4471 		break;
4472 	case CHIP_OLAND:
4473 		adev->mode_info.num_crtc = 2;
4474 		adev->mode_info.num_hpd = 2;
4475 		adev->mode_info.num_dig = 2;
4476 		break;
4477 #endif
4478 	case CHIP_BONAIRE:
4479 	case CHIP_HAWAII:
4480 		adev->mode_info.num_crtc = 6;
4481 		adev->mode_info.num_hpd = 6;
4482 		adev->mode_info.num_dig = 6;
4483 		break;
4484 	case CHIP_KAVERI:
4485 		adev->mode_info.num_crtc = 4;
4486 		adev->mode_info.num_hpd = 6;
4487 		adev->mode_info.num_dig = 7;
4488 		break;
4489 	case CHIP_KABINI:
4490 	case CHIP_MULLINS:
4491 		adev->mode_info.num_crtc = 2;
4492 		adev->mode_info.num_hpd = 6;
4493 		adev->mode_info.num_dig = 6;
4494 		break;
4495 	case CHIP_FIJI:
4496 	case CHIP_TONGA:
4497 		adev->mode_info.num_crtc = 6;
4498 		adev->mode_info.num_hpd = 6;
4499 		adev->mode_info.num_dig = 7;
4500 		break;
4501 	case CHIP_CARRIZO:
4502 		adev->mode_info.num_crtc = 3;
4503 		adev->mode_info.num_hpd = 6;
4504 		adev->mode_info.num_dig = 9;
4505 		break;
4506 	case CHIP_STONEY:
4507 		adev->mode_info.num_crtc = 2;
4508 		adev->mode_info.num_hpd = 6;
4509 		adev->mode_info.num_dig = 9;
4510 		break;
4511 	case CHIP_POLARIS11:
4512 	case CHIP_POLARIS12:
4513 		adev->mode_info.num_crtc = 5;
4514 		adev->mode_info.num_hpd = 5;
4515 		adev->mode_info.num_dig = 5;
4516 		break;
4517 	case CHIP_POLARIS10:
4518 	case CHIP_VEGAM:
4519 		adev->mode_info.num_crtc = 6;
4520 		adev->mode_info.num_hpd = 6;
4521 		adev->mode_info.num_dig = 6;
4522 		break;
4523 	case CHIP_VEGA10:
4524 	case CHIP_VEGA12:
4525 	case CHIP_VEGA20:
4526 		adev->mode_info.num_crtc = 6;
4527 		adev->mode_info.num_hpd = 6;
4528 		adev->mode_info.num_dig = 6;
4529 		break;
4530 	default:
4531 
4532 		switch (adev->ip_versions[DCE_HWIP][0]) {
4533 		case IP_VERSION(2, 0, 2):
4534 		case IP_VERSION(3, 0, 0):
4535 			adev->mode_info.num_crtc = 6;
4536 			adev->mode_info.num_hpd = 6;
4537 			adev->mode_info.num_dig = 6;
4538 			break;
4539 		case IP_VERSION(2, 0, 0):
4540 		case IP_VERSION(3, 0, 2):
4541 			adev->mode_info.num_crtc = 5;
4542 			adev->mode_info.num_hpd = 5;
4543 			adev->mode_info.num_dig = 5;
4544 			break;
4545 		case IP_VERSION(2, 0, 3):
4546 		case IP_VERSION(3, 0, 3):
4547 			adev->mode_info.num_crtc = 2;
4548 			adev->mode_info.num_hpd = 2;
4549 			adev->mode_info.num_dig = 2;
4550 			break;
4551 		case IP_VERSION(1, 0, 0):
4552 		case IP_VERSION(1, 0, 1):
4553 		case IP_VERSION(3, 0, 1):
4554 		case IP_VERSION(2, 1, 0):
4555 		case IP_VERSION(3, 1, 2):
4556 		case IP_VERSION(3, 1, 3):
4557 		case IP_VERSION(3, 1, 5):
4558 		case IP_VERSION(3, 1, 6):
4559 			adev->mode_info.num_crtc = 4;
4560 			adev->mode_info.num_hpd = 4;
4561 			adev->mode_info.num_dig = 4;
4562 			break;
4563 		default:
4564 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4565 					adev->ip_versions[DCE_HWIP][0]);
4566 			return -EINVAL;
4567 		}
4568 		break;
4569 	}
4570 
4571 	amdgpu_dm_set_irq_funcs(adev);
4572 
4573 	if (adev->mode_info.funcs == NULL)
4574 		adev->mode_info.funcs = &dm_display_funcs;
4575 
4576 	/*
4577 	 * Note: Do NOT change adev->audio_endpt_rreg and
4578 	 * adev->audio_endpt_wreg because they are initialised in
4579 	 * amdgpu_device_init()
4580 	 */
4581 #if defined(CONFIG_DEBUG_KERNEL_DC)
4582 	device_create_file(
4583 		adev_to_drm(adev)->dev,
4584 		&dev_attr_s3_debug);
4585 #endif
4586 
4587 	return 0;
4588 }
4589 
4590 static bool modeset_required(struct drm_crtc_state *crtc_state,
4591 			     struct dc_stream_state *new_stream,
4592 			     struct dc_stream_state *old_stream)
4593 {
4594 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4595 }
4596 
4597 static bool modereset_required(struct drm_crtc_state *crtc_state)
4598 {
4599 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4600 }
4601 
4602 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4603 {
4604 	drm_encoder_cleanup(encoder);
4605 	kfree(encoder);
4606 }
4607 
4608 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4609 	.destroy = amdgpu_dm_encoder_destroy,
4610 };
4611 
4612 
4613 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4614 					 struct drm_framebuffer *fb,
4615 					 int *min_downscale, int *max_upscale)
4616 {
4617 	struct amdgpu_device *adev = drm_to_adev(dev);
4618 	struct dc *dc = adev->dm.dc;
4619 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4620 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4621 
4622 	switch (fb->format->format) {
4623 	case DRM_FORMAT_P010:
4624 	case DRM_FORMAT_NV12:
4625 	case DRM_FORMAT_NV21:
4626 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4627 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4628 		break;
4629 
4630 	case DRM_FORMAT_XRGB16161616F:
4631 	case DRM_FORMAT_ARGB16161616F:
4632 	case DRM_FORMAT_XBGR16161616F:
4633 	case DRM_FORMAT_ABGR16161616F:
4634 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4635 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4636 		break;
4637 
4638 	default:
4639 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4640 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4641 		break;
4642 	}
4643 
4644 	/*
4645 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4646 	 * scaling factor of 1.0 == 1000 units.
4647 	 */
4648 	if (*max_upscale == 1)
4649 		*max_upscale = 1000;
4650 
4651 	if (*min_downscale == 1)
4652 		*min_downscale = 1000;
4653 }
4654 
4655 
4656 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4657 				const struct drm_plane_state *state,
4658 				struct dc_scaling_info *scaling_info)
4659 {
4660 	int scale_w, scale_h, min_downscale, max_upscale;
4661 
4662 	memset(scaling_info, 0, sizeof(*scaling_info));
4663 
4664 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4665 	scaling_info->src_rect.x = state->src_x >> 16;
4666 	scaling_info->src_rect.y = state->src_y >> 16;
4667 
4668 	/*
4669 	 * For reasons we don't (yet) fully understand a non-zero
4670 	 * src_y coordinate into an NV12 buffer can cause a
4671 	 * system hang on DCN1x.
4672 	 * To avoid hangs (and maybe be overly cautious)
4673 	 * let's reject both non-zero src_x and src_y.
4674 	 *
4675 	 * We currently know of only one use-case to reproduce a
4676 	 * scenario with non-zero src_x and src_y for NV12, which
4677 	 * is to gesture the YouTube Android app into full screen
4678 	 * on ChromeOS.
4679 	 */
4680 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4681 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4682 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4683 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4684 		return -EINVAL;
4685 
4686 	scaling_info->src_rect.width = state->src_w >> 16;
4687 	if (scaling_info->src_rect.width == 0)
4688 		return -EINVAL;
4689 
4690 	scaling_info->src_rect.height = state->src_h >> 16;
4691 	if (scaling_info->src_rect.height == 0)
4692 		return -EINVAL;
4693 
4694 	scaling_info->dst_rect.x = state->crtc_x;
4695 	scaling_info->dst_rect.y = state->crtc_y;
4696 
4697 	if (state->crtc_w == 0)
4698 		return -EINVAL;
4699 
4700 	scaling_info->dst_rect.width = state->crtc_w;
4701 
4702 	if (state->crtc_h == 0)
4703 		return -EINVAL;
4704 
4705 	scaling_info->dst_rect.height = state->crtc_h;
4706 
4707 	/* DRM doesn't specify clipping on destination output. */
4708 	scaling_info->clip_rect = scaling_info->dst_rect;
4709 
4710 	/* Validate scaling per-format with DC plane caps */
4711 	if (state->plane && state->plane->dev && state->fb) {
4712 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4713 					     &min_downscale, &max_upscale);
4714 	} else {
4715 		min_downscale = 250;
4716 		max_upscale = 16000;
4717 	}
4718 
4719 	scale_w = scaling_info->dst_rect.width * 1000 /
4720 		  scaling_info->src_rect.width;
4721 
4722 	if (scale_w < min_downscale || scale_w > max_upscale)
4723 		return -EINVAL;
4724 
4725 	scale_h = scaling_info->dst_rect.height * 1000 /
4726 		  scaling_info->src_rect.height;
4727 
4728 	if (scale_h < min_downscale || scale_h > max_upscale)
4729 		return -EINVAL;
4730 
4731 	/*
4732 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4733 	 * assume reasonable defaults based on the format.
4734 	 */
4735 
4736 	return 0;
4737 }
4738 
4739 static void
4740 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4741 				 uint64_t tiling_flags)
4742 {
4743 	/* Fill GFX8 params */
4744 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4745 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4746 
4747 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4748 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4749 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4750 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4751 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4752 
4753 		/* XXX fix me for VI */
4754 		tiling_info->gfx8.num_banks = num_banks;
4755 		tiling_info->gfx8.array_mode =
4756 				DC_ARRAY_2D_TILED_THIN1;
4757 		tiling_info->gfx8.tile_split = tile_split;
4758 		tiling_info->gfx8.bank_width = bankw;
4759 		tiling_info->gfx8.bank_height = bankh;
4760 		tiling_info->gfx8.tile_aspect = mtaspect;
4761 		tiling_info->gfx8.tile_mode =
4762 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4763 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4764 			== DC_ARRAY_1D_TILED_THIN1) {
4765 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4766 	}
4767 
4768 	tiling_info->gfx8.pipe_config =
4769 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4770 }
4771 
4772 static void
4773 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4774 				  union dc_tiling_info *tiling_info)
4775 {
4776 	tiling_info->gfx9.num_pipes =
4777 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4778 	tiling_info->gfx9.num_banks =
4779 		adev->gfx.config.gb_addr_config_fields.num_banks;
4780 	tiling_info->gfx9.pipe_interleave =
4781 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4782 	tiling_info->gfx9.num_shader_engines =
4783 		adev->gfx.config.gb_addr_config_fields.num_se;
4784 	tiling_info->gfx9.max_compressed_frags =
4785 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4786 	tiling_info->gfx9.num_rb_per_se =
4787 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4788 	tiling_info->gfx9.shaderEnable = 1;
4789 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4790 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4791 }
4792 
4793 static int
4794 validate_dcc(struct amdgpu_device *adev,
4795 	     const enum surface_pixel_format format,
4796 	     const enum dc_rotation_angle rotation,
4797 	     const union dc_tiling_info *tiling_info,
4798 	     const struct dc_plane_dcc_param *dcc,
4799 	     const struct dc_plane_address *address,
4800 	     const struct plane_size *plane_size)
4801 {
4802 	struct dc *dc = adev->dm.dc;
4803 	struct dc_dcc_surface_param input;
4804 	struct dc_surface_dcc_cap output;
4805 
4806 	memset(&input, 0, sizeof(input));
4807 	memset(&output, 0, sizeof(output));
4808 
4809 	if (!dcc->enable)
4810 		return 0;
4811 
4812 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4813 	    !dc->cap_funcs.get_dcc_compression_cap)
4814 		return -EINVAL;
4815 
4816 	input.format = format;
4817 	input.surface_size.width = plane_size->surface_size.width;
4818 	input.surface_size.height = plane_size->surface_size.height;
4819 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4820 
4821 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4822 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4823 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4824 		input.scan = SCAN_DIRECTION_VERTICAL;
4825 
4826 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4827 		return -EINVAL;
4828 
4829 	if (!output.capable)
4830 		return -EINVAL;
4831 
4832 	if (dcc->independent_64b_blks == 0 &&
4833 	    output.grph.rgb.independent_64b_blks != 0)
4834 		return -EINVAL;
4835 
4836 	return 0;
4837 }
4838 
4839 static bool
4840 modifier_has_dcc(uint64_t modifier)
4841 {
4842 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4843 }
4844 
4845 static unsigned
4846 modifier_gfx9_swizzle_mode(uint64_t modifier)
4847 {
4848 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4849 		return 0;
4850 
4851 	return AMD_FMT_MOD_GET(TILE, modifier);
4852 }
4853 
4854 static const struct drm_format_info *
4855 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4856 {
4857 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4858 }
4859 
4860 static void
4861 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4862 				    union dc_tiling_info *tiling_info,
4863 				    uint64_t modifier)
4864 {
4865 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4866 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4867 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4868 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4869 
4870 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4871 
4872 	if (!IS_AMD_FMT_MOD(modifier))
4873 		return;
4874 
4875 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4876 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4877 
4878 	if (adev->family >= AMDGPU_FAMILY_NV) {
4879 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4880 	} else {
4881 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4882 
4883 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4884 	}
4885 }
4886 
4887 enum dm_micro_swizzle {
4888 	MICRO_SWIZZLE_Z = 0,
4889 	MICRO_SWIZZLE_S = 1,
4890 	MICRO_SWIZZLE_D = 2,
4891 	MICRO_SWIZZLE_R = 3
4892 };
4893 
4894 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4895 					  uint32_t format,
4896 					  uint64_t modifier)
4897 {
4898 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4899 	const struct drm_format_info *info = drm_format_info(format);
4900 	int i;
4901 
4902 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4903 
4904 	if (!info)
4905 		return false;
4906 
4907 	/*
4908 	 * We always have to allow these modifiers:
4909 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4910 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4911 	 */
4912 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4913 	    modifier == DRM_FORMAT_MOD_INVALID) {
4914 		return true;
4915 	}
4916 
4917 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4918 	for (i = 0; i < plane->modifier_count; i++) {
4919 		if (modifier == plane->modifiers[i])
4920 			break;
4921 	}
4922 	if (i == plane->modifier_count)
4923 		return false;
4924 
4925 	/*
4926 	 * For D swizzle the canonical modifier depends on the bpp, so check
4927 	 * it here.
4928 	 */
4929 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4930 	    adev->family >= AMDGPU_FAMILY_NV) {
4931 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4932 			return false;
4933 	}
4934 
4935 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4936 	    info->cpp[0] < 8)
4937 		return false;
4938 
4939 	if (modifier_has_dcc(modifier)) {
4940 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4941 		if (info->cpp[0] != 4)
4942 			return false;
4943 		/* We support multi-planar formats, but not when combined with
4944 		 * additional DCC metadata planes. */
4945 		if (info->num_planes > 1)
4946 			return false;
4947 	}
4948 
4949 	return true;
4950 }
4951 
4952 static void
4953 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4954 {
4955 	if (!*mods)
4956 		return;
4957 
4958 	if (*cap - *size < 1) {
4959 		uint64_t new_cap = *cap * 2;
4960 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4961 
4962 		if (!new_mods) {
4963 			kfree(*mods);
4964 			*mods = NULL;
4965 			return;
4966 		}
4967 
4968 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4969 		kfree(*mods);
4970 		*mods = new_mods;
4971 		*cap = new_cap;
4972 	}
4973 
4974 	(*mods)[*size] = mod;
4975 	*size += 1;
4976 }
4977 
4978 static void
4979 add_gfx9_modifiers(const struct amdgpu_device *adev,
4980 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4981 {
4982 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4983 	int pipe_xor_bits = min(8, pipes +
4984 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4985 	int bank_xor_bits = min(8 - pipe_xor_bits,
4986 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4987 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4988 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4989 
4990 
4991 	if (adev->family == AMDGPU_FAMILY_RV) {
4992 		/* Raven2 and later */
4993 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4994 
4995 		/*
4996 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4997 		 * doesn't support _D on DCN
4998 		 */
4999 
5000 		if (has_constant_encode) {
5001 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5002 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5003 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5004 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5005 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5006 				    AMD_FMT_MOD_SET(DCC, 1) |
5007 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5009 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5010 		}
5011 
5012 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5013 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5014 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5015 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5016 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5017 			    AMD_FMT_MOD_SET(DCC, 1) |
5018 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5019 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5020 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5021 
5022 		if (has_constant_encode) {
5023 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5028 				    AMD_FMT_MOD_SET(DCC, 1) |
5029 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5030 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5031 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5032 
5033 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5034 				    AMD_FMT_MOD_SET(RB, rb) |
5035 				    AMD_FMT_MOD_SET(PIPE, pipes));
5036 		}
5037 
5038 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5040 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5041 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5042 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5043 			    AMD_FMT_MOD_SET(DCC, 1) |
5044 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5045 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5046 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5047 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5048 			    AMD_FMT_MOD_SET(RB, rb) |
5049 			    AMD_FMT_MOD_SET(PIPE, pipes));
5050 	}
5051 
5052 	/*
5053 	 * Only supported for 64bpp on Raven, will be filtered on format in
5054 	 * dm_plane_format_mod_supported.
5055 	 */
5056 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5058 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5061 
5062 	if (adev->family == AMDGPU_FAMILY_RV) {
5063 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5066 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5068 	}
5069 
5070 	/*
5071 	 * Only supported for 64bpp on Raven, will be filtered on format in
5072 	 * dm_plane_format_mod_supported.
5073 	 */
5074 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5076 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077 
5078 	if (adev->family == AMDGPU_FAMILY_RV) {
5079 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 	}
5083 }
5084 
5085 static void
5086 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5087 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5088 {
5089 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5090 
5091 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5094 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 		    AMD_FMT_MOD_SET(DCC, 1) |
5096 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5097 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5098 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5099 
5100 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5102 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5103 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5104 		    AMD_FMT_MOD_SET(DCC, 1) |
5105 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5106 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5107 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5108 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5109 
5110 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5112 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5113 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114 
5115 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5117 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5118 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5119 
5120 
5121 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5125 
5126 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5127 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5128 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5129 }
5130 
5131 static void
5132 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5133 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5134 {
5135 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5136 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5137 
5138 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5141 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5142 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5143 		    AMD_FMT_MOD_SET(DCC, 1) |
5144 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5145 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5146 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5147 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5148 
5149 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5151 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5152 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5153 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5154 		    AMD_FMT_MOD_SET(DCC, 1) |
5155 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5156 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5157 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5158 
5159 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5160 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5161 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5162 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5163 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5164 		    AMD_FMT_MOD_SET(DCC, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5169 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5170 
5171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5176 		    AMD_FMT_MOD_SET(DCC, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5180 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5181 
5182 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5184 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5185 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5186 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5187 
5188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5192 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5193 
5194 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5195 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5196 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5197 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5198 
5199 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5201 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5202 }
5203 
5204 static int
5205 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5206 {
5207 	uint64_t size = 0, capacity = 128;
5208 	*mods = NULL;
5209 
5210 	/* We have not hooked up any pre-GFX9 modifiers. */
5211 	if (adev->family < AMDGPU_FAMILY_AI)
5212 		return 0;
5213 
5214 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5215 
5216 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5217 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5218 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5219 		return *mods ? 0 : -ENOMEM;
5220 	}
5221 
5222 	switch (adev->family) {
5223 	case AMDGPU_FAMILY_AI:
5224 	case AMDGPU_FAMILY_RV:
5225 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5226 		break;
5227 	case AMDGPU_FAMILY_NV:
5228 	case AMDGPU_FAMILY_VGH:
5229 	case AMDGPU_FAMILY_YC:
5230 	case AMDGPU_FAMILY_GC_10_3_6:
5231 	case AMDGPU_FAMILY_GC_10_3_7:
5232 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5233 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5234 		else
5235 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5236 		break;
5237 	}
5238 
5239 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5240 
5241 	/* INVALID marks the end of the list. */
5242 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5243 
5244 	if (!*mods)
5245 		return -ENOMEM;
5246 
5247 	return 0;
5248 }
5249 
5250 static int
5251 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5252 					  const struct amdgpu_framebuffer *afb,
5253 					  const enum surface_pixel_format format,
5254 					  const enum dc_rotation_angle rotation,
5255 					  const struct plane_size *plane_size,
5256 					  union dc_tiling_info *tiling_info,
5257 					  struct dc_plane_dcc_param *dcc,
5258 					  struct dc_plane_address *address,
5259 					  const bool force_disable_dcc)
5260 {
5261 	const uint64_t modifier = afb->base.modifier;
5262 	int ret = 0;
5263 
5264 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5265 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5266 
5267 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5268 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5269 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5270 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5271 
5272 		dcc->enable = 1;
5273 		dcc->meta_pitch = afb->base.pitches[1];
5274 		dcc->independent_64b_blks = independent_64b_blks;
5275 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5276 			if (independent_64b_blks && independent_128b_blks)
5277 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5278 			else if (independent_128b_blks)
5279 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5280 			else if (independent_64b_blks && !independent_128b_blks)
5281 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5282 			else
5283 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5284 		} else {
5285 			if (independent_64b_blks)
5286 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5287 			else
5288 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5289 		}
5290 
5291 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5292 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5293 	}
5294 
5295 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5296 	if (ret)
5297 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5298 
5299 	return ret;
5300 }
5301 
5302 static int
5303 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5304 			     const struct amdgpu_framebuffer *afb,
5305 			     const enum surface_pixel_format format,
5306 			     const enum dc_rotation_angle rotation,
5307 			     const uint64_t tiling_flags,
5308 			     union dc_tiling_info *tiling_info,
5309 			     struct plane_size *plane_size,
5310 			     struct dc_plane_dcc_param *dcc,
5311 			     struct dc_plane_address *address,
5312 			     bool tmz_surface,
5313 			     bool force_disable_dcc)
5314 {
5315 	const struct drm_framebuffer *fb = &afb->base;
5316 	int ret;
5317 
5318 	memset(tiling_info, 0, sizeof(*tiling_info));
5319 	memset(plane_size, 0, sizeof(*plane_size));
5320 	memset(dcc, 0, sizeof(*dcc));
5321 	memset(address, 0, sizeof(*address));
5322 
5323 	address->tmz_surface = tmz_surface;
5324 
5325 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5326 		uint64_t addr = afb->address + fb->offsets[0];
5327 
5328 		plane_size->surface_size.x = 0;
5329 		plane_size->surface_size.y = 0;
5330 		plane_size->surface_size.width = fb->width;
5331 		plane_size->surface_size.height = fb->height;
5332 		plane_size->surface_pitch =
5333 			fb->pitches[0] / fb->format->cpp[0];
5334 
5335 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5336 		address->grph.addr.low_part = lower_32_bits(addr);
5337 		address->grph.addr.high_part = upper_32_bits(addr);
5338 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5339 		uint64_t luma_addr = afb->address + fb->offsets[0];
5340 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5341 
5342 		plane_size->surface_size.x = 0;
5343 		plane_size->surface_size.y = 0;
5344 		plane_size->surface_size.width = fb->width;
5345 		plane_size->surface_size.height = fb->height;
5346 		plane_size->surface_pitch =
5347 			fb->pitches[0] / fb->format->cpp[0];
5348 
5349 		plane_size->chroma_size.x = 0;
5350 		plane_size->chroma_size.y = 0;
5351 		/* TODO: set these based on surface format */
5352 		plane_size->chroma_size.width = fb->width / 2;
5353 		plane_size->chroma_size.height = fb->height / 2;
5354 
5355 		plane_size->chroma_pitch =
5356 			fb->pitches[1] / fb->format->cpp[1];
5357 
5358 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5359 		address->video_progressive.luma_addr.low_part =
5360 			lower_32_bits(luma_addr);
5361 		address->video_progressive.luma_addr.high_part =
5362 			upper_32_bits(luma_addr);
5363 		address->video_progressive.chroma_addr.low_part =
5364 			lower_32_bits(chroma_addr);
5365 		address->video_progressive.chroma_addr.high_part =
5366 			upper_32_bits(chroma_addr);
5367 	}
5368 
5369 	if (adev->family >= AMDGPU_FAMILY_AI) {
5370 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5371 								rotation, plane_size,
5372 								tiling_info, dcc,
5373 								address,
5374 								force_disable_dcc);
5375 		if (ret)
5376 			return ret;
5377 	} else {
5378 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5379 	}
5380 
5381 	return 0;
5382 }
5383 
5384 static void
5385 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5386 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5387 			       bool *global_alpha, int *global_alpha_value)
5388 {
5389 	*per_pixel_alpha = false;
5390 	*pre_multiplied_alpha = true;
5391 	*global_alpha = false;
5392 	*global_alpha_value = 0xff;
5393 
5394 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5395 		return;
5396 
5397 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5398 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5399 		static const uint32_t alpha_formats[] = {
5400 			DRM_FORMAT_ARGB8888,
5401 			DRM_FORMAT_RGBA8888,
5402 			DRM_FORMAT_ABGR8888,
5403 		};
5404 		uint32_t format = plane_state->fb->format->format;
5405 		unsigned int i;
5406 
5407 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5408 			if (format == alpha_formats[i]) {
5409 				*per_pixel_alpha = true;
5410 				break;
5411 			}
5412 		}
5413 
5414 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5415 			*pre_multiplied_alpha = false;
5416 	}
5417 
5418 	if (plane_state->alpha < 0xffff) {
5419 		*global_alpha = true;
5420 		*global_alpha_value = plane_state->alpha >> 8;
5421 	}
5422 }
5423 
5424 static int
5425 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5426 			    const enum surface_pixel_format format,
5427 			    enum dc_color_space *color_space)
5428 {
5429 	bool full_range;
5430 
5431 	*color_space = COLOR_SPACE_SRGB;
5432 
5433 	/* DRM color properties only affect non-RGB formats. */
5434 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5435 		return 0;
5436 
5437 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5438 
5439 	switch (plane_state->color_encoding) {
5440 	case DRM_COLOR_YCBCR_BT601:
5441 		if (full_range)
5442 			*color_space = COLOR_SPACE_YCBCR601;
5443 		else
5444 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5445 		break;
5446 
5447 	case DRM_COLOR_YCBCR_BT709:
5448 		if (full_range)
5449 			*color_space = COLOR_SPACE_YCBCR709;
5450 		else
5451 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5452 		break;
5453 
5454 	case DRM_COLOR_YCBCR_BT2020:
5455 		if (full_range)
5456 			*color_space = COLOR_SPACE_2020_YCBCR;
5457 		else
5458 			return -EINVAL;
5459 		break;
5460 
5461 	default:
5462 		return -EINVAL;
5463 	}
5464 
5465 	return 0;
5466 }
5467 
5468 static int
5469 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5470 			    const struct drm_plane_state *plane_state,
5471 			    const uint64_t tiling_flags,
5472 			    struct dc_plane_info *plane_info,
5473 			    struct dc_plane_address *address,
5474 			    bool tmz_surface,
5475 			    bool force_disable_dcc)
5476 {
5477 	const struct drm_framebuffer *fb = plane_state->fb;
5478 	const struct amdgpu_framebuffer *afb =
5479 		to_amdgpu_framebuffer(plane_state->fb);
5480 	int ret;
5481 
5482 	memset(plane_info, 0, sizeof(*plane_info));
5483 
5484 	switch (fb->format->format) {
5485 	case DRM_FORMAT_C8:
5486 		plane_info->format =
5487 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5488 		break;
5489 	case DRM_FORMAT_RGB565:
5490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5491 		break;
5492 	case DRM_FORMAT_XRGB8888:
5493 	case DRM_FORMAT_ARGB8888:
5494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5495 		break;
5496 	case DRM_FORMAT_XRGB2101010:
5497 	case DRM_FORMAT_ARGB2101010:
5498 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5499 		break;
5500 	case DRM_FORMAT_XBGR2101010:
5501 	case DRM_FORMAT_ABGR2101010:
5502 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5503 		break;
5504 	case DRM_FORMAT_XBGR8888:
5505 	case DRM_FORMAT_ABGR8888:
5506 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5507 		break;
5508 	case DRM_FORMAT_NV21:
5509 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5510 		break;
5511 	case DRM_FORMAT_NV12:
5512 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5513 		break;
5514 	case DRM_FORMAT_P010:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5516 		break;
5517 	case DRM_FORMAT_XRGB16161616F:
5518 	case DRM_FORMAT_ARGB16161616F:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5520 		break;
5521 	case DRM_FORMAT_XBGR16161616F:
5522 	case DRM_FORMAT_ABGR16161616F:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5524 		break;
5525 	case DRM_FORMAT_XRGB16161616:
5526 	case DRM_FORMAT_ARGB16161616:
5527 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5528 		break;
5529 	case DRM_FORMAT_XBGR16161616:
5530 	case DRM_FORMAT_ABGR16161616:
5531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5532 		break;
5533 	default:
5534 		DRM_ERROR(
5535 			"Unsupported screen format %p4cc\n",
5536 			&fb->format->format);
5537 		return -EINVAL;
5538 	}
5539 
5540 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5541 	case DRM_MODE_ROTATE_0:
5542 		plane_info->rotation = ROTATION_ANGLE_0;
5543 		break;
5544 	case DRM_MODE_ROTATE_90:
5545 		plane_info->rotation = ROTATION_ANGLE_90;
5546 		break;
5547 	case DRM_MODE_ROTATE_180:
5548 		plane_info->rotation = ROTATION_ANGLE_180;
5549 		break;
5550 	case DRM_MODE_ROTATE_270:
5551 		plane_info->rotation = ROTATION_ANGLE_270;
5552 		break;
5553 	default:
5554 		plane_info->rotation = ROTATION_ANGLE_0;
5555 		break;
5556 	}
5557 
5558 	plane_info->visible = true;
5559 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5560 
5561 	plane_info->layer_index = 0;
5562 
5563 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5564 					  &plane_info->color_space);
5565 	if (ret)
5566 		return ret;
5567 
5568 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5569 					   plane_info->rotation, tiling_flags,
5570 					   &plane_info->tiling_info,
5571 					   &plane_info->plane_size,
5572 					   &plane_info->dcc, address, tmz_surface,
5573 					   force_disable_dcc);
5574 	if (ret)
5575 		return ret;
5576 
5577 	fill_blending_from_plane_state(
5578 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5579 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5580 
5581 	return 0;
5582 }
5583 
5584 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5585 				    struct dc_plane_state *dc_plane_state,
5586 				    struct drm_plane_state *plane_state,
5587 				    struct drm_crtc_state *crtc_state)
5588 {
5589 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5590 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5591 	struct dc_scaling_info scaling_info;
5592 	struct dc_plane_info plane_info;
5593 	int ret;
5594 	bool force_disable_dcc = false;
5595 
5596 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5597 	if (ret)
5598 		return ret;
5599 
5600 	dc_plane_state->src_rect = scaling_info.src_rect;
5601 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5602 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5603 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5604 
5605 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5606 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5607 					  afb->tiling_flags,
5608 					  &plane_info,
5609 					  &dc_plane_state->address,
5610 					  afb->tmz_surface,
5611 					  force_disable_dcc);
5612 	if (ret)
5613 		return ret;
5614 
5615 	dc_plane_state->format = plane_info.format;
5616 	dc_plane_state->color_space = plane_info.color_space;
5617 	dc_plane_state->format = plane_info.format;
5618 	dc_plane_state->plane_size = plane_info.plane_size;
5619 	dc_plane_state->rotation = plane_info.rotation;
5620 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5621 	dc_plane_state->stereo_format = plane_info.stereo_format;
5622 	dc_plane_state->tiling_info = plane_info.tiling_info;
5623 	dc_plane_state->visible = plane_info.visible;
5624 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5625 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5626 	dc_plane_state->global_alpha = plane_info.global_alpha;
5627 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5628 	dc_plane_state->dcc = plane_info.dcc;
5629 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5630 	dc_plane_state->flip_int_enabled = true;
5631 
5632 	/*
5633 	 * Always set input transfer function, since plane state is refreshed
5634 	 * every time.
5635 	 */
5636 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5637 	if (ret)
5638 		return ret;
5639 
5640 	return 0;
5641 }
5642 
5643 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5644 					   const struct dm_connector_state *dm_state,
5645 					   struct dc_stream_state *stream)
5646 {
5647 	enum amdgpu_rmx_type rmx_type;
5648 
5649 	struct rect src = { 0 }; /* viewport in composition space*/
5650 	struct rect dst = { 0 }; /* stream addressable area */
5651 
5652 	/* no mode. nothing to be done */
5653 	if (!mode)
5654 		return;
5655 
5656 	/* Full screen scaling by default */
5657 	src.width = mode->hdisplay;
5658 	src.height = mode->vdisplay;
5659 	dst.width = stream->timing.h_addressable;
5660 	dst.height = stream->timing.v_addressable;
5661 
5662 	if (dm_state) {
5663 		rmx_type = dm_state->scaling;
5664 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5665 			if (src.width * dst.height <
5666 					src.height * dst.width) {
5667 				/* height needs less upscaling/more downscaling */
5668 				dst.width = src.width *
5669 						dst.height / src.height;
5670 			} else {
5671 				/* width needs less upscaling/more downscaling */
5672 				dst.height = src.height *
5673 						dst.width / src.width;
5674 			}
5675 		} else if (rmx_type == RMX_CENTER) {
5676 			dst = src;
5677 		}
5678 
5679 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5680 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5681 
5682 		if (dm_state->underscan_enable) {
5683 			dst.x += dm_state->underscan_hborder / 2;
5684 			dst.y += dm_state->underscan_vborder / 2;
5685 			dst.width -= dm_state->underscan_hborder;
5686 			dst.height -= dm_state->underscan_vborder;
5687 		}
5688 	}
5689 
5690 	stream->src = src;
5691 	stream->dst = dst;
5692 
5693 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5694 		      dst.x, dst.y, dst.width, dst.height);
5695 
5696 }
5697 
5698 static enum dc_color_depth
5699 convert_color_depth_from_display_info(const struct drm_connector *connector,
5700 				      bool is_y420, int requested_bpc)
5701 {
5702 	uint8_t bpc;
5703 
5704 	if (is_y420) {
5705 		bpc = 8;
5706 
5707 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5708 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5709 			bpc = 16;
5710 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5711 			bpc = 12;
5712 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5713 			bpc = 10;
5714 	} else {
5715 		bpc = (uint8_t)connector->display_info.bpc;
5716 		/* Assume 8 bpc by default if no bpc is specified. */
5717 		bpc = bpc ? bpc : 8;
5718 	}
5719 
5720 	if (requested_bpc > 0) {
5721 		/*
5722 		 * Cap display bpc based on the user requested value.
5723 		 *
5724 		 * The value for state->max_bpc may not correctly updated
5725 		 * depending on when the connector gets added to the state
5726 		 * or if this was called outside of atomic check, so it
5727 		 * can't be used directly.
5728 		 */
5729 		bpc = min_t(u8, bpc, requested_bpc);
5730 
5731 		/* Round down to the nearest even number. */
5732 		bpc = bpc - (bpc & 1);
5733 	}
5734 
5735 	switch (bpc) {
5736 	case 0:
5737 		/*
5738 		 * Temporary Work around, DRM doesn't parse color depth for
5739 		 * EDID revision before 1.4
5740 		 * TODO: Fix edid parsing
5741 		 */
5742 		return COLOR_DEPTH_888;
5743 	case 6:
5744 		return COLOR_DEPTH_666;
5745 	case 8:
5746 		return COLOR_DEPTH_888;
5747 	case 10:
5748 		return COLOR_DEPTH_101010;
5749 	case 12:
5750 		return COLOR_DEPTH_121212;
5751 	case 14:
5752 		return COLOR_DEPTH_141414;
5753 	case 16:
5754 		return COLOR_DEPTH_161616;
5755 	default:
5756 		return COLOR_DEPTH_UNDEFINED;
5757 	}
5758 }
5759 
5760 static enum dc_aspect_ratio
5761 get_aspect_ratio(const struct drm_display_mode *mode_in)
5762 {
5763 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5764 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5765 }
5766 
5767 static enum dc_color_space
5768 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5769 {
5770 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5771 
5772 	switch (dc_crtc_timing->pixel_encoding)	{
5773 	case PIXEL_ENCODING_YCBCR422:
5774 	case PIXEL_ENCODING_YCBCR444:
5775 	case PIXEL_ENCODING_YCBCR420:
5776 	{
5777 		/*
5778 		 * 27030khz is the separation point between HDTV and SDTV
5779 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5780 		 * respectively
5781 		 */
5782 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5783 			if (dc_crtc_timing->flags.Y_ONLY)
5784 				color_space =
5785 					COLOR_SPACE_YCBCR709_LIMITED;
5786 			else
5787 				color_space = COLOR_SPACE_YCBCR709;
5788 		} else {
5789 			if (dc_crtc_timing->flags.Y_ONLY)
5790 				color_space =
5791 					COLOR_SPACE_YCBCR601_LIMITED;
5792 			else
5793 				color_space = COLOR_SPACE_YCBCR601;
5794 		}
5795 
5796 	}
5797 	break;
5798 	case PIXEL_ENCODING_RGB:
5799 		color_space = COLOR_SPACE_SRGB;
5800 		break;
5801 
5802 	default:
5803 		WARN_ON(1);
5804 		break;
5805 	}
5806 
5807 	return color_space;
5808 }
5809 
5810 static bool adjust_colour_depth_from_display_info(
5811 	struct dc_crtc_timing *timing_out,
5812 	const struct drm_display_info *info)
5813 {
5814 	enum dc_color_depth depth = timing_out->display_color_depth;
5815 	int normalized_clk;
5816 	do {
5817 		normalized_clk = timing_out->pix_clk_100hz / 10;
5818 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5819 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5820 			normalized_clk /= 2;
5821 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5822 		switch (depth) {
5823 		case COLOR_DEPTH_888:
5824 			break;
5825 		case COLOR_DEPTH_101010:
5826 			normalized_clk = (normalized_clk * 30) / 24;
5827 			break;
5828 		case COLOR_DEPTH_121212:
5829 			normalized_clk = (normalized_clk * 36) / 24;
5830 			break;
5831 		case COLOR_DEPTH_161616:
5832 			normalized_clk = (normalized_clk * 48) / 24;
5833 			break;
5834 		default:
5835 			/* The above depths are the only ones valid for HDMI. */
5836 			return false;
5837 		}
5838 		if (normalized_clk <= info->max_tmds_clock) {
5839 			timing_out->display_color_depth = depth;
5840 			return true;
5841 		}
5842 	} while (--depth > COLOR_DEPTH_666);
5843 	return false;
5844 }
5845 
5846 static void fill_stream_properties_from_drm_display_mode(
5847 	struct dc_stream_state *stream,
5848 	const struct drm_display_mode *mode_in,
5849 	const struct drm_connector *connector,
5850 	const struct drm_connector_state *connector_state,
5851 	const struct dc_stream_state *old_stream,
5852 	int requested_bpc)
5853 {
5854 	struct dc_crtc_timing *timing_out = &stream->timing;
5855 	const struct drm_display_info *info = &connector->display_info;
5856 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5857 	struct hdmi_vendor_infoframe hv_frame;
5858 	struct hdmi_avi_infoframe avi_frame;
5859 
5860 	memset(&hv_frame, 0, sizeof(hv_frame));
5861 	memset(&avi_frame, 0, sizeof(avi_frame));
5862 
5863 	timing_out->h_border_left = 0;
5864 	timing_out->h_border_right = 0;
5865 	timing_out->v_border_top = 0;
5866 	timing_out->v_border_bottom = 0;
5867 	/* TODO: un-hardcode */
5868 	if (drm_mode_is_420_only(info, mode_in)
5869 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5870 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5871 	else if (drm_mode_is_420_also(info, mode_in)
5872 			&& aconnector->force_yuv420_output)
5873 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5874 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5875 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5876 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5877 	else
5878 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5879 
5880 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5881 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5882 		connector,
5883 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5884 		requested_bpc);
5885 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5886 	timing_out->hdmi_vic = 0;
5887 
5888 	if(old_stream) {
5889 		timing_out->vic = old_stream->timing.vic;
5890 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5891 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5892 	} else {
5893 		timing_out->vic = drm_match_cea_mode(mode_in);
5894 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5895 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5896 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5897 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5898 	}
5899 
5900 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5901 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5902 		timing_out->vic = avi_frame.video_code;
5903 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5904 		timing_out->hdmi_vic = hv_frame.vic;
5905 	}
5906 
5907 	if (is_freesync_video_mode(mode_in, aconnector)) {
5908 		timing_out->h_addressable = mode_in->hdisplay;
5909 		timing_out->h_total = mode_in->htotal;
5910 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5911 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5912 		timing_out->v_total = mode_in->vtotal;
5913 		timing_out->v_addressable = mode_in->vdisplay;
5914 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5915 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5916 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5917 	} else {
5918 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5919 		timing_out->h_total = mode_in->crtc_htotal;
5920 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5921 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5922 		timing_out->v_total = mode_in->crtc_vtotal;
5923 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5924 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5925 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5926 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5927 	}
5928 
5929 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5930 
5931 	stream->output_color_space = get_output_color_space(timing_out);
5932 
5933 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5934 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5935 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5936 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5937 		    drm_mode_is_420_also(info, mode_in) &&
5938 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5939 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5940 			adjust_colour_depth_from_display_info(timing_out, info);
5941 		}
5942 	}
5943 }
5944 
5945 static void fill_audio_info(struct audio_info *audio_info,
5946 			    const struct drm_connector *drm_connector,
5947 			    const struct dc_sink *dc_sink)
5948 {
5949 	int i = 0;
5950 	int cea_revision = 0;
5951 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5952 
5953 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5954 	audio_info->product_id = edid_caps->product_id;
5955 
5956 	cea_revision = drm_connector->display_info.cea_rev;
5957 
5958 	strscpy(audio_info->display_name,
5959 		edid_caps->display_name,
5960 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5961 
5962 	if (cea_revision >= 3) {
5963 		audio_info->mode_count = edid_caps->audio_mode_count;
5964 
5965 		for (i = 0; i < audio_info->mode_count; ++i) {
5966 			audio_info->modes[i].format_code =
5967 					(enum audio_format_code)
5968 					(edid_caps->audio_modes[i].format_code);
5969 			audio_info->modes[i].channel_count =
5970 					edid_caps->audio_modes[i].channel_count;
5971 			audio_info->modes[i].sample_rates.all =
5972 					edid_caps->audio_modes[i].sample_rate;
5973 			audio_info->modes[i].sample_size =
5974 					edid_caps->audio_modes[i].sample_size;
5975 		}
5976 	}
5977 
5978 	audio_info->flags.all = edid_caps->speaker_flags;
5979 
5980 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5981 	if (drm_connector->latency_present[0]) {
5982 		audio_info->video_latency = drm_connector->video_latency[0];
5983 		audio_info->audio_latency = drm_connector->audio_latency[0];
5984 	}
5985 
5986 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5987 
5988 }
5989 
5990 static void
5991 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5992 				      struct drm_display_mode *dst_mode)
5993 {
5994 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5995 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5996 	dst_mode->crtc_clock = src_mode->crtc_clock;
5997 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5998 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5999 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6000 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6001 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6002 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6003 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6004 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6005 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6006 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6007 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6008 }
6009 
6010 static void
6011 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6012 					const struct drm_display_mode *native_mode,
6013 					bool scale_enabled)
6014 {
6015 	if (scale_enabled) {
6016 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6017 	} else if (native_mode->clock == drm_mode->clock &&
6018 			native_mode->htotal == drm_mode->htotal &&
6019 			native_mode->vtotal == drm_mode->vtotal) {
6020 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6021 	} else {
6022 		/* no scaling nor amdgpu inserted, no need to patch */
6023 	}
6024 }
6025 
6026 static struct dc_sink *
6027 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6028 {
6029 	struct dc_sink_init_data sink_init_data = { 0 };
6030 	struct dc_sink *sink = NULL;
6031 	sink_init_data.link = aconnector->dc_link;
6032 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6033 
6034 	sink = dc_sink_create(&sink_init_data);
6035 	if (!sink) {
6036 		DRM_ERROR("Failed to create sink!\n");
6037 		return NULL;
6038 	}
6039 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6040 
6041 	return sink;
6042 }
6043 
6044 static void set_multisync_trigger_params(
6045 		struct dc_stream_state *stream)
6046 {
6047 	struct dc_stream_state *master = NULL;
6048 
6049 	if (stream->triggered_crtc_reset.enabled) {
6050 		master = stream->triggered_crtc_reset.event_source;
6051 		stream->triggered_crtc_reset.event =
6052 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6053 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6054 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6055 	}
6056 }
6057 
6058 static void set_master_stream(struct dc_stream_state *stream_set[],
6059 			      int stream_count)
6060 {
6061 	int j, highest_rfr = 0, master_stream = 0;
6062 
6063 	for (j = 0;  j < stream_count; j++) {
6064 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6065 			int refresh_rate = 0;
6066 
6067 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6068 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6069 			if (refresh_rate > highest_rfr) {
6070 				highest_rfr = refresh_rate;
6071 				master_stream = j;
6072 			}
6073 		}
6074 	}
6075 	for (j = 0;  j < stream_count; j++) {
6076 		if (stream_set[j])
6077 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6078 	}
6079 }
6080 
6081 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6082 {
6083 	int i = 0;
6084 	struct dc_stream_state *stream;
6085 
6086 	if (context->stream_count < 2)
6087 		return;
6088 	for (i = 0; i < context->stream_count ; i++) {
6089 		if (!context->streams[i])
6090 			continue;
6091 		/*
6092 		 * TODO: add a function to read AMD VSDB bits and set
6093 		 * crtc_sync_master.multi_sync_enabled flag
6094 		 * For now it's set to false
6095 		 */
6096 	}
6097 
6098 	set_master_stream(context->streams, context->stream_count);
6099 
6100 	for (i = 0; i < context->stream_count ; i++) {
6101 		stream = context->streams[i];
6102 
6103 		if (!stream)
6104 			continue;
6105 
6106 		set_multisync_trigger_params(stream);
6107 	}
6108 }
6109 
6110 #if defined(CONFIG_DRM_AMD_DC_DCN)
6111 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6112 							struct dc_sink *sink, struct dc_stream_state *stream,
6113 							struct dsc_dec_dpcd_caps *dsc_caps)
6114 {
6115 	stream->timing.flags.DSC = 0;
6116 	dsc_caps->is_dsc_supported = false;
6117 
6118 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6119 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6120 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6121 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6122 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6123 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6124 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6125 				dsc_caps);
6126 	}
6127 }
6128 
6129 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6130 				    struct dc_sink *sink, struct dc_stream_state *stream,
6131 				    struct dsc_dec_dpcd_caps *dsc_caps,
6132 				    uint32_t max_dsc_target_bpp_limit_override)
6133 {
6134 	const struct dc_link_settings *verified_link_cap = NULL;
6135 	uint32_t link_bw_in_kbps;
6136 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6137 	struct dc *dc = sink->ctx->dc;
6138 	struct dc_dsc_bw_range bw_range = {0};
6139 	struct dc_dsc_config dsc_cfg = {0};
6140 
6141 	verified_link_cap = dc_link_get_link_cap(stream->link);
6142 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6143 	edp_min_bpp_x16 = 8 * 16;
6144 	edp_max_bpp_x16 = 8 * 16;
6145 
6146 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6147 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6148 
6149 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6150 		edp_min_bpp_x16 = edp_max_bpp_x16;
6151 
6152 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6153 				dc->debug.dsc_min_slice_height_override,
6154 				edp_min_bpp_x16, edp_max_bpp_x16,
6155 				dsc_caps,
6156 				&stream->timing,
6157 				&bw_range)) {
6158 
6159 		if (bw_range.max_kbps < link_bw_in_kbps) {
6160 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6161 					dsc_caps,
6162 					dc->debug.dsc_min_slice_height_override,
6163 					max_dsc_target_bpp_limit_override,
6164 					0,
6165 					&stream->timing,
6166 					&dsc_cfg)) {
6167 				stream->timing.dsc_cfg = dsc_cfg;
6168 				stream->timing.flags.DSC = 1;
6169 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6170 			}
6171 			return;
6172 		}
6173 	}
6174 
6175 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6176 				dsc_caps,
6177 				dc->debug.dsc_min_slice_height_override,
6178 				max_dsc_target_bpp_limit_override,
6179 				link_bw_in_kbps,
6180 				&stream->timing,
6181 				&dsc_cfg)) {
6182 		stream->timing.dsc_cfg = dsc_cfg;
6183 		stream->timing.flags.DSC = 1;
6184 	}
6185 }
6186 
6187 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6188 										struct dc_sink *sink, struct dc_stream_state *stream,
6189 										struct dsc_dec_dpcd_caps *dsc_caps)
6190 {
6191 	struct drm_connector *drm_connector = &aconnector->base;
6192 	uint32_t link_bandwidth_kbps;
6193 	uint32_t max_dsc_target_bpp_limit_override = 0;
6194 	struct dc *dc = sink->ctx->dc;
6195 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6196 	uint32_t dsc_max_supported_bw_in_kbps;
6197 
6198 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6199 							dc_link_get_link_cap(aconnector->dc_link));
6200 
6201 	if (stream->link && stream->link->local_sink)
6202 		max_dsc_target_bpp_limit_override =
6203 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6204 
6205 	/* Set DSC policy according to dsc_clock_en */
6206 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6207 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6208 
6209 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6210 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6211 
6212 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6213 
6214 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6215 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6216 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6217 						dsc_caps,
6218 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6219 						max_dsc_target_bpp_limit_override,
6220 						link_bandwidth_kbps,
6221 						&stream->timing,
6222 						&stream->timing.dsc_cfg)) {
6223 				stream->timing.flags.DSC = 1;
6224 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6225 								 __func__, drm_connector->name);
6226 			}
6227 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6228 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6229 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6230 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6231 
6232 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6233 					max_supported_bw_in_kbps > 0 &&
6234 					dsc_max_supported_bw_in_kbps > 0)
6235 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6236 						dsc_caps,
6237 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6238 						max_dsc_target_bpp_limit_override,
6239 						dsc_max_supported_bw_in_kbps,
6240 						&stream->timing,
6241 						&stream->timing.dsc_cfg)) {
6242 					stream->timing.flags.DSC = 1;
6243 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6244 									 __func__, drm_connector->name);
6245 				}
6246 		}
6247 	}
6248 
6249 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6250 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6251 		stream->timing.flags.DSC = 1;
6252 
6253 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6254 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6255 
6256 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6257 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6258 
6259 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6260 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6261 }
6262 #endif /* CONFIG_DRM_AMD_DC_DCN */
6263 
6264 /**
6265  * DOC: FreeSync Video
6266  *
6267  * When a userspace application wants to play a video, the content follows a
6268  * standard format definition that usually specifies the FPS for that format.
6269  * The below list illustrates some video format and the expected FPS,
6270  * respectively:
6271  *
6272  * - TV/NTSC (23.976 FPS)
6273  * - Cinema (24 FPS)
6274  * - TV/PAL (25 FPS)
6275  * - TV/NTSC (29.97 FPS)
6276  * - TV/NTSC (30 FPS)
6277  * - Cinema HFR (48 FPS)
6278  * - TV/PAL (50 FPS)
6279  * - Commonly used (60 FPS)
6280  * - Multiples of 24 (48,72,96,120 FPS)
6281  *
6282  * The list of standards video format is not huge and can be added to the
6283  * connector modeset list beforehand. With that, userspace can leverage
6284  * FreeSync to extends the front porch in order to attain the target refresh
6285  * rate. Such a switch will happen seamlessly, without screen blanking or
6286  * reprogramming of the output in any other way. If the userspace requests a
6287  * modesetting change compatible with FreeSync modes that only differ in the
6288  * refresh rate, DC will skip the full update and avoid blink during the
6289  * transition. For example, the video player can change the modesetting from
6290  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6291  * causing any display blink. This same concept can be applied to a mode
6292  * setting change.
6293  */
6294 static struct drm_display_mode *
6295 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6296 			  bool use_probed_modes)
6297 {
6298 	struct drm_display_mode *m, *m_pref = NULL;
6299 	u16 current_refresh, highest_refresh;
6300 	struct list_head *list_head = use_probed_modes ?
6301 						    &aconnector->base.probed_modes :
6302 						    &aconnector->base.modes;
6303 
6304 	if (aconnector->freesync_vid_base.clock != 0)
6305 		return &aconnector->freesync_vid_base;
6306 
6307 	/* Find the preferred mode */
6308 	list_for_each_entry (m, list_head, head) {
6309 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6310 			m_pref = m;
6311 			break;
6312 		}
6313 	}
6314 
6315 	if (!m_pref) {
6316 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6317 		m_pref = list_first_entry_or_null(
6318 			&aconnector->base.modes, struct drm_display_mode, head);
6319 		if (!m_pref) {
6320 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6321 			return NULL;
6322 		}
6323 	}
6324 
6325 	highest_refresh = drm_mode_vrefresh(m_pref);
6326 
6327 	/*
6328 	 * Find the mode with highest refresh rate with same resolution.
6329 	 * For some monitors, preferred mode is not the mode with highest
6330 	 * supported refresh rate.
6331 	 */
6332 	list_for_each_entry (m, list_head, head) {
6333 		current_refresh  = drm_mode_vrefresh(m);
6334 
6335 		if (m->hdisplay == m_pref->hdisplay &&
6336 		    m->vdisplay == m_pref->vdisplay &&
6337 		    highest_refresh < current_refresh) {
6338 			highest_refresh = current_refresh;
6339 			m_pref = m;
6340 		}
6341 	}
6342 
6343 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6344 	return m_pref;
6345 }
6346 
6347 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6348 				   struct amdgpu_dm_connector *aconnector)
6349 {
6350 	struct drm_display_mode *high_mode;
6351 	int timing_diff;
6352 
6353 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6354 	if (!high_mode || !mode)
6355 		return false;
6356 
6357 	timing_diff = high_mode->vtotal - mode->vtotal;
6358 
6359 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6360 	    high_mode->hdisplay != mode->hdisplay ||
6361 	    high_mode->vdisplay != mode->vdisplay ||
6362 	    high_mode->hsync_start != mode->hsync_start ||
6363 	    high_mode->hsync_end != mode->hsync_end ||
6364 	    high_mode->htotal != mode->htotal ||
6365 	    high_mode->hskew != mode->hskew ||
6366 	    high_mode->vscan != mode->vscan ||
6367 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6368 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6369 		return false;
6370 	else
6371 		return true;
6372 }
6373 
6374 static struct dc_stream_state *
6375 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6376 		       const struct drm_display_mode *drm_mode,
6377 		       const struct dm_connector_state *dm_state,
6378 		       const struct dc_stream_state *old_stream,
6379 		       int requested_bpc)
6380 {
6381 	struct drm_display_mode *preferred_mode = NULL;
6382 	struct drm_connector *drm_connector;
6383 	const struct drm_connector_state *con_state =
6384 		dm_state ? &dm_state->base : NULL;
6385 	struct dc_stream_state *stream = NULL;
6386 	struct drm_display_mode mode = *drm_mode;
6387 	struct drm_display_mode saved_mode;
6388 	struct drm_display_mode *freesync_mode = NULL;
6389 	bool native_mode_found = false;
6390 	bool recalculate_timing = false;
6391 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6392 	int mode_refresh;
6393 	int preferred_refresh = 0;
6394 #if defined(CONFIG_DRM_AMD_DC_DCN)
6395 	struct dsc_dec_dpcd_caps dsc_caps;
6396 #endif
6397 	struct dc_sink *sink = NULL;
6398 
6399 	memset(&saved_mode, 0, sizeof(saved_mode));
6400 
6401 	if (aconnector == NULL) {
6402 		DRM_ERROR("aconnector is NULL!\n");
6403 		return stream;
6404 	}
6405 
6406 	drm_connector = &aconnector->base;
6407 
6408 	if (!aconnector->dc_sink) {
6409 		sink = create_fake_sink(aconnector);
6410 		if (!sink)
6411 			return stream;
6412 	} else {
6413 		sink = aconnector->dc_sink;
6414 		dc_sink_retain(sink);
6415 	}
6416 
6417 	stream = dc_create_stream_for_sink(sink);
6418 
6419 	if (stream == NULL) {
6420 		DRM_ERROR("Failed to create stream for sink!\n");
6421 		goto finish;
6422 	}
6423 
6424 	stream->dm_stream_context = aconnector;
6425 
6426 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6427 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6428 
6429 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6430 		/* Search for preferred mode */
6431 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6432 			native_mode_found = true;
6433 			break;
6434 		}
6435 	}
6436 	if (!native_mode_found)
6437 		preferred_mode = list_first_entry_or_null(
6438 				&aconnector->base.modes,
6439 				struct drm_display_mode,
6440 				head);
6441 
6442 	mode_refresh = drm_mode_vrefresh(&mode);
6443 
6444 	if (preferred_mode == NULL) {
6445 		/*
6446 		 * This may not be an error, the use case is when we have no
6447 		 * usermode calls to reset and set mode upon hotplug. In this
6448 		 * case, we call set mode ourselves to restore the previous mode
6449 		 * and the modelist may not be filled in in time.
6450 		 */
6451 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6452 	} else {
6453 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6454 		if (recalculate_timing) {
6455 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6456 			drm_mode_copy(&saved_mode, &mode);
6457 			drm_mode_copy(&mode, freesync_mode);
6458 		} else {
6459 			decide_crtc_timing_for_drm_display_mode(
6460 				&mode, preferred_mode, scale);
6461 
6462 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6463 		}
6464 	}
6465 
6466 	if (recalculate_timing)
6467 		drm_mode_set_crtcinfo(&saved_mode, 0);
6468 	else if (!dm_state)
6469 		drm_mode_set_crtcinfo(&mode, 0);
6470 
6471        /*
6472 	* If scaling is enabled and refresh rate didn't change
6473 	* we copy the vic and polarities of the old timings
6474 	*/
6475 	if (!scale || mode_refresh != preferred_refresh)
6476 		fill_stream_properties_from_drm_display_mode(
6477 			stream, &mode, &aconnector->base, con_state, NULL,
6478 			requested_bpc);
6479 	else
6480 		fill_stream_properties_from_drm_display_mode(
6481 			stream, &mode, &aconnector->base, con_state, old_stream,
6482 			requested_bpc);
6483 
6484 #if defined(CONFIG_DRM_AMD_DC_DCN)
6485 	/* SST DSC determination policy */
6486 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6487 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6488 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6489 #endif
6490 
6491 	update_stream_scaling_settings(&mode, dm_state, stream);
6492 
6493 	fill_audio_info(
6494 		&stream->audio_info,
6495 		drm_connector,
6496 		sink);
6497 
6498 	update_stream_signal(stream, sink);
6499 
6500 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6501 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6502 
6503 	if (stream->link->psr_settings.psr_feature_enabled) {
6504 		//
6505 		// should decide stream support vsc sdp colorimetry capability
6506 		// before building vsc info packet
6507 		//
6508 		stream->use_vsc_sdp_for_colorimetry = false;
6509 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6510 			stream->use_vsc_sdp_for_colorimetry =
6511 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6512 		} else {
6513 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6514 				stream->use_vsc_sdp_for_colorimetry = true;
6515 		}
6516 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6517 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6518 
6519 	}
6520 finish:
6521 	dc_sink_release(sink);
6522 
6523 	return stream;
6524 }
6525 
6526 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6527 {
6528 	drm_crtc_cleanup(crtc);
6529 	kfree(crtc);
6530 }
6531 
6532 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6533 				  struct drm_crtc_state *state)
6534 {
6535 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6536 
6537 	/* TODO Destroy dc_stream objects are stream object is flattened */
6538 	if (cur->stream)
6539 		dc_stream_release(cur->stream);
6540 
6541 
6542 	__drm_atomic_helper_crtc_destroy_state(state);
6543 
6544 
6545 	kfree(state);
6546 }
6547 
6548 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6549 {
6550 	struct dm_crtc_state *state;
6551 
6552 	if (crtc->state)
6553 		dm_crtc_destroy_state(crtc, crtc->state);
6554 
6555 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6556 	if (WARN_ON(!state))
6557 		return;
6558 
6559 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6560 }
6561 
6562 static struct drm_crtc_state *
6563 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6564 {
6565 	struct dm_crtc_state *state, *cur;
6566 
6567 	cur = to_dm_crtc_state(crtc->state);
6568 
6569 	if (WARN_ON(!crtc->state))
6570 		return NULL;
6571 
6572 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6573 	if (!state)
6574 		return NULL;
6575 
6576 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6577 
6578 	if (cur->stream) {
6579 		state->stream = cur->stream;
6580 		dc_stream_retain(state->stream);
6581 	}
6582 
6583 	state->active_planes = cur->active_planes;
6584 	state->vrr_infopacket = cur->vrr_infopacket;
6585 	state->abm_level = cur->abm_level;
6586 	state->vrr_supported = cur->vrr_supported;
6587 	state->freesync_config = cur->freesync_config;
6588 	state->cm_has_degamma = cur->cm_has_degamma;
6589 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6590 	state->force_dpms_off = cur->force_dpms_off;
6591 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6592 
6593 	return &state->base;
6594 }
6595 
6596 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6597 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6598 {
6599 	crtc_debugfs_init(crtc);
6600 
6601 	return 0;
6602 }
6603 #endif
6604 
6605 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6606 {
6607 	enum dc_irq_source irq_source;
6608 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6609 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6610 	int rc;
6611 
6612 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6613 
6614 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6615 
6616 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6617 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6618 	return rc;
6619 }
6620 
6621 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6622 {
6623 	enum dc_irq_source irq_source;
6624 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6625 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6626 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6627 	struct amdgpu_display_manager *dm = &adev->dm;
6628 	struct vblank_control_work *work;
6629 	int rc = 0;
6630 
6631 	if (enable) {
6632 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6633 		if (amdgpu_dm_vrr_active(acrtc_state))
6634 			rc = dm_set_vupdate_irq(crtc, true);
6635 	} else {
6636 		/* vblank irq off -> vupdate irq off */
6637 		rc = dm_set_vupdate_irq(crtc, false);
6638 	}
6639 
6640 	if (rc)
6641 		return rc;
6642 
6643 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6644 
6645 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6646 		return -EBUSY;
6647 
6648 	if (amdgpu_in_reset(adev))
6649 		return 0;
6650 
6651 	if (dm->vblank_control_workqueue) {
6652 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6653 		if (!work)
6654 			return -ENOMEM;
6655 
6656 		INIT_WORK(&work->work, vblank_control_worker);
6657 		work->dm = dm;
6658 		work->acrtc = acrtc;
6659 		work->enable = enable;
6660 
6661 		if (acrtc_state->stream) {
6662 			dc_stream_retain(acrtc_state->stream);
6663 			work->stream = acrtc_state->stream;
6664 		}
6665 
6666 		queue_work(dm->vblank_control_workqueue, &work->work);
6667 	}
6668 
6669 	return 0;
6670 }
6671 
6672 static int dm_enable_vblank(struct drm_crtc *crtc)
6673 {
6674 	return dm_set_vblank(crtc, true);
6675 }
6676 
6677 static void dm_disable_vblank(struct drm_crtc *crtc)
6678 {
6679 	dm_set_vblank(crtc, false);
6680 }
6681 
6682 /* Implemented only the options currently available for the driver */
6683 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6684 	.reset = dm_crtc_reset_state,
6685 	.destroy = amdgpu_dm_crtc_destroy,
6686 	.set_config = drm_atomic_helper_set_config,
6687 	.page_flip = drm_atomic_helper_page_flip,
6688 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6689 	.atomic_destroy_state = dm_crtc_destroy_state,
6690 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6691 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6692 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6693 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6694 	.enable_vblank = dm_enable_vblank,
6695 	.disable_vblank = dm_disable_vblank,
6696 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6697 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6698 	.late_register = amdgpu_dm_crtc_late_register,
6699 #endif
6700 };
6701 
6702 static enum drm_connector_status
6703 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6704 {
6705 	bool connected;
6706 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6707 
6708 	/*
6709 	 * Notes:
6710 	 * 1. This interface is NOT called in context of HPD irq.
6711 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6712 	 * makes it a bad place for *any* MST-related activity.
6713 	 */
6714 
6715 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6716 	    !aconnector->fake_enable)
6717 		connected = (aconnector->dc_sink != NULL);
6718 	else
6719 		connected = (aconnector->base.force == DRM_FORCE_ON);
6720 
6721 	update_subconnector_property(aconnector);
6722 
6723 	return (connected ? connector_status_connected :
6724 			connector_status_disconnected);
6725 }
6726 
6727 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6728 					    struct drm_connector_state *connector_state,
6729 					    struct drm_property *property,
6730 					    uint64_t val)
6731 {
6732 	struct drm_device *dev = connector->dev;
6733 	struct amdgpu_device *adev = drm_to_adev(dev);
6734 	struct dm_connector_state *dm_old_state =
6735 		to_dm_connector_state(connector->state);
6736 	struct dm_connector_state *dm_new_state =
6737 		to_dm_connector_state(connector_state);
6738 
6739 	int ret = -EINVAL;
6740 
6741 	if (property == dev->mode_config.scaling_mode_property) {
6742 		enum amdgpu_rmx_type rmx_type;
6743 
6744 		switch (val) {
6745 		case DRM_MODE_SCALE_CENTER:
6746 			rmx_type = RMX_CENTER;
6747 			break;
6748 		case DRM_MODE_SCALE_ASPECT:
6749 			rmx_type = RMX_ASPECT;
6750 			break;
6751 		case DRM_MODE_SCALE_FULLSCREEN:
6752 			rmx_type = RMX_FULL;
6753 			break;
6754 		case DRM_MODE_SCALE_NONE:
6755 		default:
6756 			rmx_type = RMX_OFF;
6757 			break;
6758 		}
6759 
6760 		if (dm_old_state->scaling == rmx_type)
6761 			return 0;
6762 
6763 		dm_new_state->scaling = rmx_type;
6764 		ret = 0;
6765 	} else if (property == adev->mode_info.underscan_hborder_property) {
6766 		dm_new_state->underscan_hborder = val;
6767 		ret = 0;
6768 	} else if (property == adev->mode_info.underscan_vborder_property) {
6769 		dm_new_state->underscan_vborder = val;
6770 		ret = 0;
6771 	} else if (property == adev->mode_info.underscan_property) {
6772 		dm_new_state->underscan_enable = val;
6773 		ret = 0;
6774 	} else if (property == adev->mode_info.abm_level_property) {
6775 		dm_new_state->abm_level = val;
6776 		ret = 0;
6777 	}
6778 
6779 	return ret;
6780 }
6781 
6782 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6783 					    const struct drm_connector_state *state,
6784 					    struct drm_property *property,
6785 					    uint64_t *val)
6786 {
6787 	struct drm_device *dev = connector->dev;
6788 	struct amdgpu_device *adev = drm_to_adev(dev);
6789 	struct dm_connector_state *dm_state =
6790 		to_dm_connector_state(state);
6791 	int ret = -EINVAL;
6792 
6793 	if (property == dev->mode_config.scaling_mode_property) {
6794 		switch (dm_state->scaling) {
6795 		case RMX_CENTER:
6796 			*val = DRM_MODE_SCALE_CENTER;
6797 			break;
6798 		case RMX_ASPECT:
6799 			*val = DRM_MODE_SCALE_ASPECT;
6800 			break;
6801 		case RMX_FULL:
6802 			*val = DRM_MODE_SCALE_FULLSCREEN;
6803 			break;
6804 		case RMX_OFF:
6805 		default:
6806 			*val = DRM_MODE_SCALE_NONE;
6807 			break;
6808 		}
6809 		ret = 0;
6810 	} else if (property == adev->mode_info.underscan_hborder_property) {
6811 		*val = dm_state->underscan_hborder;
6812 		ret = 0;
6813 	} else if (property == adev->mode_info.underscan_vborder_property) {
6814 		*val = dm_state->underscan_vborder;
6815 		ret = 0;
6816 	} else if (property == adev->mode_info.underscan_property) {
6817 		*val = dm_state->underscan_enable;
6818 		ret = 0;
6819 	} else if (property == adev->mode_info.abm_level_property) {
6820 		*val = dm_state->abm_level;
6821 		ret = 0;
6822 	}
6823 
6824 	return ret;
6825 }
6826 
6827 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6828 {
6829 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6830 
6831 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6832 }
6833 
6834 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6835 {
6836 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6837 	const struct dc_link *link = aconnector->dc_link;
6838 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6839 	struct amdgpu_display_manager *dm = &adev->dm;
6840 	int i;
6841 
6842 	/*
6843 	 * Call only if mst_mgr was iniitalized before since it's not done
6844 	 * for all connector types.
6845 	 */
6846 	if (aconnector->mst_mgr.dev)
6847 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6848 
6849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6850 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6851 	for (i = 0; i < dm->num_of_edps; i++) {
6852 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6853 			backlight_device_unregister(dm->backlight_dev[i]);
6854 			dm->backlight_dev[i] = NULL;
6855 		}
6856 	}
6857 #endif
6858 
6859 	if (aconnector->dc_em_sink)
6860 		dc_sink_release(aconnector->dc_em_sink);
6861 	aconnector->dc_em_sink = NULL;
6862 	if (aconnector->dc_sink)
6863 		dc_sink_release(aconnector->dc_sink);
6864 	aconnector->dc_sink = NULL;
6865 
6866 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6867 	drm_connector_unregister(connector);
6868 	drm_connector_cleanup(connector);
6869 	if (aconnector->i2c) {
6870 		i2c_del_adapter(&aconnector->i2c->base);
6871 		kfree(aconnector->i2c);
6872 	}
6873 	kfree(aconnector->dm_dp_aux.aux.name);
6874 
6875 	kfree(connector);
6876 }
6877 
6878 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6879 {
6880 	struct dm_connector_state *state =
6881 		to_dm_connector_state(connector->state);
6882 
6883 	if (connector->state)
6884 		__drm_atomic_helper_connector_destroy_state(connector->state);
6885 
6886 	kfree(state);
6887 
6888 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6889 
6890 	if (state) {
6891 		state->scaling = RMX_OFF;
6892 		state->underscan_enable = false;
6893 		state->underscan_hborder = 0;
6894 		state->underscan_vborder = 0;
6895 		state->base.max_requested_bpc = 8;
6896 		state->vcpi_slots = 0;
6897 		state->pbn = 0;
6898 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6899 			state->abm_level = amdgpu_dm_abm_level;
6900 
6901 		__drm_atomic_helper_connector_reset(connector, &state->base);
6902 	}
6903 }
6904 
6905 struct drm_connector_state *
6906 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6907 {
6908 	struct dm_connector_state *state =
6909 		to_dm_connector_state(connector->state);
6910 
6911 	struct dm_connector_state *new_state =
6912 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6913 
6914 	if (!new_state)
6915 		return NULL;
6916 
6917 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6918 
6919 	new_state->freesync_capable = state->freesync_capable;
6920 	new_state->abm_level = state->abm_level;
6921 	new_state->scaling = state->scaling;
6922 	new_state->underscan_enable = state->underscan_enable;
6923 	new_state->underscan_hborder = state->underscan_hborder;
6924 	new_state->underscan_vborder = state->underscan_vborder;
6925 	new_state->vcpi_slots = state->vcpi_slots;
6926 	new_state->pbn = state->pbn;
6927 	return &new_state->base;
6928 }
6929 
6930 static int
6931 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6932 {
6933 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6934 		to_amdgpu_dm_connector(connector);
6935 	int r;
6936 
6937 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6938 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6939 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6940 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6941 		if (r)
6942 			return r;
6943 	}
6944 
6945 #if defined(CONFIG_DEBUG_FS)
6946 	connector_debugfs_init(amdgpu_dm_connector);
6947 #endif
6948 
6949 	return 0;
6950 }
6951 
6952 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6953 	.reset = amdgpu_dm_connector_funcs_reset,
6954 	.detect = amdgpu_dm_connector_detect,
6955 	.fill_modes = drm_helper_probe_single_connector_modes,
6956 	.destroy = amdgpu_dm_connector_destroy,
6957 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6958 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6959 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6960 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6961 	.late_register = amdgpu_dm_connector_late_register,
6962 	.early_unregister = amdgpu_dm_connector_unregister
6963 };
6964 
6965 static int get_modes(struct drm_connector *connector)
6966 {
6967 	return amdgpu_dm_connector_get_modes(connector);
6968 }
6969 
6970 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6971 {
6972 	struct dc_sink_init_data init_params = {
6973 			.link = aconnector->dc_link,
6974 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6975 	};
6976 	struct edid *edid;
6977 
6978 	if (!aconnector->base.edid_blob_ptr) {
6979 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6980 				aconnector->base.name);
6981 
6982 		aconnector->base.force = DRM_FORCE_OFF;
6983 		aconnector->base.override_edid = false;
6984 		return;
6985 	}
6986 
6987 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6988 
6989 	aconnector->edid = edid;
6990 
6991 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6992 		aconnector->dc_link,
6993 		(uint8_t *)edid,
6994 		(edid->extensions + 1) * EDID_LENGTH,
6995 		&init_params);
6996 
6997 	if (aconnector->base.force == DRM_FORCE_ON) {
6998 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6999 		aconnector->dc_link->local_sink :
7000 		aconnector->dc_em_sink;
7001 		dc_sink_retain(aconnector->dc_sink);
7002 	}
7003 }
7004 
7005 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7006 {
7007 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7008 
7009 	/*
7010 	 * In case of headless boot with force on for DP managed connector
7011 	 * Those settings have to be != 0 to get initial modeset
7012 	 */
7013 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7014 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7015 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7016 	}
7017 
7018 
7019 	aconnector->base.override_edid = true;
7020 	create_eml_sink(aconnector);
7021 }
7022 
7023 struct dc_stream_state *
7024 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7025 				const struct drm_display_mode *drm_mode,
7026 				const struct dm_connector_state *dm_state,
7027 				const struct dc_stream_state *old_stream)
7028 {
7029 	struct drm_connector *connector = &aconnector->base;
7030 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7031 	struct dc_stream_state *stream;
7032 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7033 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7034 	enum dc_status dc_result = DC_OK;
7035 
7036 	do {
7037 		stream = create_stream_for_sink(aconnector, drm_mode,
7038 						dm_state, old_stream,
7039 						requested_bpc);
7040 		if (stream == NULL) {
7041 			DRM_ERROR("Failed to create stream for sink!\n");
7042 			break;
7043 		}
7044 
7045 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7046 
7047 		if (dc_result != DC_OK) {
7048 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7049 				      drm_mode->hdisplay,
7050 				      drm_mode->vdisplay,
7051 				      drm_mode->clock,
7052 				      dc_result,
7053 				      dc_status_to_str(dc_result));
7054 
7055 			dc_stream_release(stream);
7056 			stream = NULL;
7057 			requested_bpc -= 2; /* lower bpc to retry validation */
7058 		}
7059 
7060 	} while (stream == NULL && requested_bpc >= 6);
7061 
7062 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7063 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7064 
7065 		aconnector->force_yuv420_output = true;
7066 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7067 						dm_state, old_stream);
7068 		aconnector->force_yuv420_output = false;
7069 	}
7070 
7071 	return stream;
7072 }
7073 
7074 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7075 				   struct drm_display_mode *mode)
7076 {
7077 	int result = MODE_ERROR;
7078 	struct dc_sink *dc_sink;
7079 	/* TODO: Unhardcode stream count */
7080 	struct dc_stream_state *stream;
7081 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7082 
7083 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7084 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7085 		return result;
7086 
7087 	/*
7088 	 * Only run this the first time mode_valid is called to initilialize
7089 	 * EDID mgmt
7090 	 */
7091 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7092 		!aconnector->dc_em_sink)
7093 		handle_edid_mgmt(aconnector);
7094 
7095 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7096 
7097 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7098 				aconnector->base.force != DRM_FORCE_ON) {
7099 		DRM_ERROR("dc_sink is NULL!\n");
7100 		goto fail;
7101 	}
7102 
7103 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7104 	if (stream) {
7105 		dc_stream_release(stream);
7106 		result = MODE_OK;
7107 	}
7108 
7109 fail:
7110 	/* TODO: error handling*/
7111 	return result;
7112 }
7113 
7114 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7115 				struct dc_info_packet *out)
7116 {
7117 	struct hdmi_drm_infoframe frame;
7118 	unsigned char buf[30]; /* 26 + 4 */
7119 	ssize_t len;
7120 	int ret, i;
7121 
7122 	memset(out, 0, sizeof(*out));
7123 
7124 	if (!state->hdr_output_metadata)
7125 		return 0;
7126 
7127 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7128 	if (ret)
7129 		return ret;
7130 
7131 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7132 	if (len < 0)
7133 		return (int)len;
7134 
7135 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7136 	if (len != 30)
7137 		return -EINVAL;
7138 
7139 	/* Prepare the infopacket for DC. */
7140 	switch (state->connector->connector_type) {
7141 	case DRM_MODE_CONNECTOR_HDMIA:
7142 		out->hb0 = 0x87; /* type */
7143 		out->hb1 = 0x01; /* version */
7144 		out->hb2 = 0x1A; /* length */
7145 		out->sb[0] = buf[3]; /* checksum */
7146 		i = 1;
7147 		break;
7148 
7149 	case DRM_MODE_CONNECTOR_DisplayPort:
7150 	case DRM_MODE_CONNECTOR_eDP:
7151 		out->hb0 = 0x00; /* sdp id, zero */
7152 		out->hb1 = 0x87; /* type */
7153 		out->hb2 = 0x1D; /* payload len - 1 */
7154 		out->hb3 = (0x13 << 2); /* sdp version */
7155 		out->sb[0] = 0x01; /* version */
7156 		out->sb[1] = 0x1A; /* length */
7157 		i = 2;
7158 		break;
7159 
7160 	default:
7161 		return -EINVAL;
7162 	}
7163 
7164 	memcpy(&out->sb[i], &buf[4], 26);
7165 	out->valid = true;
7166 
7167 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7168 		       sizeof(out->sb), false);
7169 
7170 	return 0;
7171 }
7172 
7173 static int
7174 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7175 				 struct drm_atomic_state *state)
7176 {
7177 	struct drm_connector_state *new_con_state =
7178 		drm_atomic_get_new_connector_state(state, conn);
7179 	struct drm_connector_state *old_con_state =
7180 		drm_atomic_get_old_connector_state(state, conn);
7181 	struct drm_crtc *crtc = new_con_state->crtc;
7182 	struct drm_crtc_state *new_crtc_state;
7183 	int ret;
7184 
7185 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7186 
7187 	if (!crtc)
7188 		return 0;
7189 
7190 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7191 		struct dc_info_packet hdr_infopacket;
7192 
7193 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7194 		if (ret)
7195 			return ret;
7196 
7197 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7198 		if (IS_ERR(new_crtc_state))
7199 			return PTR_ERR(new_crtc_state);
7200 
7201 		/*
7202 		 * DC considers the stream backends changed if the
7203 		 * static metadata changes. Forcing the modeset also
7204 		 * gives a simple way for userspace to switch from
7205 		 * 8bpc to 10bpc when setting the metadata to enter
7206 		 * or exit HDR.
7207 		 *
7208 		 * Changing the static metadata after it's been
7209 		 * set is permissible, however. So only force a
7210 		 * modeset if we're entering or exiting HDR.
7211 		 */
7212 		new_crtc_state->mode_changed =
7213 			!old_con_state->hdr_output_metadata ||
7214 			!new_con_state->hdr_output_metadata;
7215 	}
7216 
7217 	return 0;
7218 }
7219 
7220 static const struct drm_connector_helper_funcs
7221 amdgpu_dm_connector_helper_funcs = {
7222 	/*
7223 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7224 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7225 	 * are missing after user start lightdm. So we need to renew modes list.
7226 	 * in get_modes call back, not just return the modes count
7227 	 */
7228 	.get_modes = get_modes,
7229 	.mode_valid = amdgpu_dm_connector_mode_valid,
7230 	.atomic_check = amdgpu_dm_connector_atomic_check,
7231 };
7232 
7233 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7234 {
7235 }
7236 
7237 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7238 {
7239 	struct drm_atomic_state *state = new_crtc_state->state;
7240 	struct drm_plane *plane;
7241 	int num_active = 0;
7242 
7243 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7244 		struct drm_plane_state *new_plane_state;
7245 
7246 		/* Cursor planes are "fake". */
7247 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7248 			continue;
7249 
7250 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7251 
7252 		if (!new_plane_state) {
7253 			/*
7254 			 * The plane is enable on the CRTC and hasn't changed
7255 			 * state. This means that it previously passed
7256 			 * validation and is therefore enabled.
7257 			 */
7258 			num_active += 1;
7259 			continue;
7260 		}
7261 
7262 		/* We need a framebuffer to be considered enabled. */
7263 		num_active += (new_plane_state->fb != NULL);
7264 	}
7265 
7266 	return num_active;
7267 }
7268 
7269 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7270 					 struct drm_crtc_state *new_crtc_state)
7271 {
7272 	struct dm_crtc_state *dm_new_crtc_state =
7273 		to_dm_crtc_state(new_crtc_state);
7274 
7275 	dm_new_crtc_state->active_planes = 0;
7276 
7277 	if (!dm_new_crtc_state->stream)
7278 		return;
7279 
7280 	dm_new_crtc_state->active_planes =
7281 		count_crtc_active_planes(new_crtc_state);
7282 }
7283 
7284 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7285 				       struct drm_atomic_state *state)
7286 {
7287 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7288 									  crtc);
7289 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7290 	struct dc *dc = adev->dm.dc;
7291 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7292 	int ret = -EINVAL;
7293 
7294 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7295 
7296 	dm_update_crtc_active_planes(crtc, crtc_state);
7297 
7298 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7299 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7300 		return ret;
7301 	}
7302 
7303 	/*
7304 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7305 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7306 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7307 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7308 	 */
7309 	if (crtc_state->enable &&
7310 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7311 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7312 		return -EINVAL;
7313 	}
7314 
7315 	/* In some use cases, like reset, no stream is attached */
7316 	if (!dm_crtc_state->stream)
7317 		return 0;
7318 
7319 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7320 		return 0;
7321 
7322 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7323 	return ret;
7324 }
7325 
7326 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7327 				      const struct drm_display_mode *mode,
7328 				      struct drm_display_mode *adjusted_mode)
7329 {
7330 	return true;
7331 }
7332 
7333 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7334 	.disable = dm_crtc_helper_disable,
7335 	.atomic_check = dm_crtc_helper_atomic_check,
7336 	.mode_fixup = dm_crtc_helper_mode_fixup,
7337 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7338 };
7339 
7340 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7341 {
7342 
7343 }
7344 
7345 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7346 {
7347 	switch (display_color_depth) {
7348 		case COLOR_DEPTH_666:
7349 			return 6;
7350 		case COLOR_DEPTH_888:
7351 			return 8;
7352 		case COLOR_DEPTH_101010:
7353 			return 10;
7354 		case COLOR_DEPTH_121212:
7355 			return 12;
7356 		case COLOR_DEPTH_141414:
7357 			return 14;
7358 		case COLOR_DEPTH_161616:
7359 			return 16;
7360 		default:
7361 			break;
7362 		}
7363 	return 0;
7364 }
7365 
7366 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7367 					  struct drm_crtc_state *crtc_state,
7368 					  struct drm_connector_state *conn_state)
7369 {
7370 	struct drm_atomic_state *state = crtc_state->state;
7371 	struct drm_connector *connector = conn_state->connector;
7372 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7373 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7374 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7375 	struct drm_dp_mst_topology_mgr *mst_mgr;
7376 	struct drm_dp_mst_port *mst_port;
7377 	enum dc_color_depth color_depth;
7378 	int clock, bpp = 0;
7379 	bool is_y420 = false;
7380 
7381 	if (!aconnector->port || !aconnector->dc_sink)
7382 		return 0;
7383 
7384 	mst_port = aconnector->port;
7385 	mst_mgr = &aconnector->mst_port->mst_mgr;
7386 
7387 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7388 		return 0;
7389 
7390 	if (!state->duplicated) {
7391 		int max_bpc = conn_state->max_requested_bpc;
7392 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7393 				aconnector->force_yuv420_output;
7394 		color_depth = convert_color_depth_from_display_info(connector,
7395 								    is_y420,
7396 								    max_bpc);
7397 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7398 		clock = adjusted_mode->clock;
7399 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7400 	}
7401 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7402 									   mst_mgr,
7403 									   mst_port,
7404 									   dm_new_connector_state->pbn,
7405 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7406 	if (dm_new_connector_state->vcpi_slots < 0) {
7407 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7408 		return dm_new_connector_state->vcpi_slots;
7409 	}
7410 	return 0;
7411 }
7412 
7413 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7414 	.disable = dm_encoder_helper_disable,
7415 	.atomic_check = dm_encoder_helper_atomic_check
7416 };
7417 
7418 #if defined(CONFIG_DRM_AMD_DC_DCN)
7419 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7420 					    struct dc_state *dc_state,
7421 					    struct dsc_mst_fairness_vars *vars)
7422 {
7423 	struct dc_stream_state *stream = NULL;
7424 	struct drm_connector *connector;
7425 	struct drm_connector_state *new_con_state;
7426 	struct amdgpu_dm_connector *aconnector;
7427 	struct dm_connector_state *dm_conn_state;
7428 	int i, j;
7429 	int vcpi, pbn_div, pbn, slot_num = 0;
7430 
7431 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7432 
7433 		aconnector = to_amdgpu_dm_connector(connector);
7434 
7435 		if (!aconnector->port)
7436 			continue;
7437 
7438 		if (!new_con_state || !new_con_state->crtc)
7439 			continue;
7440 
7441 		dm_conn_state = to_dm_connector_state(new_con_state);
7442 
7443 		for (j = 0; j < dc_state->stream_count; j++) {
7444 			stream = dc_state->streams[j];
7445 			if (!stream)
7446 				continue;
7447 
7448 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7449 				break;
7450 
7451 			stream = NULL;
7452 		}
7453 
7454 		if (!stream)
7455 			continue;
7456 
7457 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7458 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7459 		for (j = 0; j < dc_state->stream_count; j++) {
7460 			if (vars[j].aconnector == aconnector) {
7461 				pbn = vars[j].pbn;
7462 				break;
7463 			}
7464 		}
7465 
7466 		if (j == dc_state->stream_count)
7467 			continue;
7468 
7469 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7470 
7471 		if (stream->timing.flags.DSC != 1) {
7472 			dm_conn_state->pbn = pbn;
7473 			dm_conn_state->vcpi_slots = slot_num;
7474 
7475 			drm_dp_mst_atomic_enable_dsc(state,
7476 						     aconnector->port,
7477 						     dm_conn_state->pbn,
7478 						     0,
7479 						     false);
7480 			continue;
7481 		}
7482 
7483 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7484 						    aconnector->port,
7485 						    pbn, pbn_div,
7486 						    true);
7487 		if (vcpi < 0)
7488 			return vcpi;
7489 
7490 		dm_conn_state->pbn = pbn;
7491 		dm_conn_state->vcpi_slots = vcpi;
7492 	}
7493 	return 0;
7494 }
7495 #endif
7496 
7497 static void dm_drm_plane_reset(struct drm_plane *plane)
7498 {
7499 	struct dm_plane_state *amdgpu_state = NULL;
7500 
7501 	if (plane->state)
7502 		plane->funcs->atomic_destroy_state(plane, plane->state);
7503 
7504 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7505 	WARN_ON(amdgpu_state == NULL);
7506 
7507 	if (amdgpu_state)
7508 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7509 }
7510 
7511 static struct drm_plane_state *
7512 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7513 {
7514 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7515 
7516 	old_dm_plane_state = to_dm_plane_state(plane->state);
7517 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7518 	if (!dm_plane_state)
7519 		return NULL;
7520 
7521 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7522 
7523 	if (old_dm_plane_state->dc_state) {
7524 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7525 		dc_plane_state_retain(dm_plane_state->dc_state);
7526 	}
7527 
7528 	return &dm_plane_state->base;
7529 }
7530 
7531 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7532 				struct drm_plane_state *state)
7533 {
7534 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7535 
7536 	if (dm_plane_state->dc_state)
7537 		dc_plane_state_release(dm_plane_state->dc_state);
7538 
7539 	drm_atomic_helper_plane_destroy_state(plane, state);
7540 }
7541 
7542 static const struct drm_plane_funcs dm_plane_funcs = {
7543 	.update_plane	= drm_atomic_helper_update_plane,
7544 	.disable_plane	= drm_atomic_helper_disable_plane,
7545 	.destroy	= drm_primary_helper_destroy,
7546 	.reset = dm_drm_plane_reset,
7547 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7548 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7549 	.format_mod_supported = dm_plane_format_mod_supported,
7550 };
7551 
7552 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7553 				      struct drm_plane_state *new_state)
7554 {
7555 	struct amdgpu_framebuffer *afb;
7556 	struct drm_gem_object *obj;
7557 	struct amdgpu_device *adev;
7558 	struct amdgpu_bo *rbo;
7559 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7560 	uint32_t domain;
7561 	int r;
7562 
7563 	if (!new_state->fb) {
7564 		DRM_DEBUG_KMS("No FB bound\n");
7565 		return 0;
7566 	}
7567 
7568 	afb = to_amdgpu_framebuffer(new_state->fb);
7569 	obj = new_state->fb->obj[0];
7570 	rbo = gem_to_amdgpu_bo(obj);
7571 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7572 
7573 	r = amdgpu_bo_reserve(rbo, true);
7574 	if (r) {
7575 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7576 		return r;
7577 	}
7578 
7579 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7580 	if (r) {
7581 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7582 		goto error_unlock;
7583 	}
7584 
7585 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7586 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7587 	else
7588 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7589 
7590 	r = amdgpu_bo_pin(rbo, domain);
7591 	if (unlikely(r != 0)) {
7592 		if (r != -ERESTARTSYS)
7593 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7594 		goto error_unlock;
7595 	}
7596 
7597 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7598 	if (unlikely(r != 0)) {
7599 		DRM_ERROR("%p bind failed\n", rbo);
7600 		goto error_unpin;
7601 	}
7602 
7603 	amdgpu_bo_unreserve(rbo);
7604 
7605 	afb->address = amdgpu_bo_gpu_offset(rbo);
7606 
7607 	amdgpu_bo_ref(rbo);
7608 
7609 	/**
7610 	 * We don't do surface updates on planes that have been newly created,
7611 	 * but we also don't have the afb->address during atomic check.
7612 	 *
7613 	 * Fill in buffer attributes depending on the address here, but only on
7614 	 * newly created planes since they're not being used by DC yet and this
7615 	 * won't modify global state.
7616 	 */
7617 	dm_plane_state_old = to_dm_plane_state(plane->state);
7618 	dm_plane_state_new = to_dm_plane_state(new_state);
7619 
7620 	if (dm_plane_state_new->dc_state &&
7621 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7622 		struct dc_plane_state *plane_state =
7623 			dm_plane_state_new->dc_state;
7624 		bool force_disable_dcc = !plane_state->dcc.enable;
7625 
7626 		fill_plane_buffer_attributes(
7627 			adev, afb, plane_state->format, plane_state->rotation,
7628 			afb->tiling_flags,
7629 			&plane_state->tiling_info, &plane_state->plane_size,
7630 			&plane_state->dcc, &plane_state->address,
7631 			afb->tmz_surface, force_disable_dcc);
7632 	}
7633 
7634 	return 0;
7635 
7636 error_unpin:
7637 	amdgpu_bo_unpin(rbo);
7638 
7639 error_unlock:
7640 	amdgpu_bo_unreserve(rbo);
7641 	return r;
7642 }
7643 
7644 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7645 				       struct drm_plane_state *old_state)
7646 {
7647 	struct amdgpu_bo *rbo;
7648 	int r;
7649 
7650 	if (!old_state->fb)
7651 		return;
7652 
7653 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7654 	r = amdgpu_bo_reserve(rbo, false);
7655 	if (unlikely(r)) {
7656 		DRM_ERROR("failed to reserve rbo before unpin\n");
7657 		return;
7658 	}
7659 
7660 	amdgpu_bo_unpin(rbo);
7661 	amdgpu_bo_unreserve(rbo);
7662 	amdgpu_bo_unref(&rbo);
7663 }
7664 
7665 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7666 				       struct drm_crtc_state *new_crtc_state)
7667 {
7668 	struct drm_framebuffer *fb = state->fb;
7669 	int min_downscale, max_upscale;
7670 	int min_scale = 0;
7671 	int max_scale = INT_MAX;
7672 
7673 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7674 	if (fb && state->crtc) {
7675 		/* Validate viewport to cover the case when only the position changes */
7676 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7677 			int viewport_width = state->crtc_w;
7678 			int viewport_height = state->crtc_h;
7679 
7680 			if (state->crtc_x < 0)
7681 				viewport_width += state->crtc_x;
7682 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7683 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7684 
7685 			if (state->crtc_y < 0)
7686 				viewport_height += state->crtc_y;
7687 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7688 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7689 
7690 			if (viewport_width < 0 || viewport_height < 0) {
7691 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7692 				return -EINVAL;
7693 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7694 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7695 				return -EINVAL;
7696 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7697 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7698 				return -EINVAL;
7699 			}
7700 
7701 		}
7702 
7703 		/* Get min/max allowed scaling factors from plane caps. */
7704 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7705 					     &min_downscale, &max_upscale);
7706 		/*
7707 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7708 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7709 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7710 		 */
7711 		min_scale = (1000 << 16) / max_upscale;
7712 		max_scale = (1000 << 16) / min_downscale;
7713 	}
7714 
7715 	return drm_atomic_helper_check_plane_state(
7716 		state, new_crtc_state, min_scale, max_scale, true, true);
7717 }
7718 
7719 static int dm_plane_atomic_check(struct drm_plane *plane,
7720 				 struct drm_atomic_state *state)
7721 {
7722 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7723 										 plane);
7724 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7725 	struct dc *dc = adev->dm.dc;
7726 	struct dm_plane_state *dm_plane_state;
7727 	struct dc_scaling_info scaling_info;
7728 	struct drm_crtc_state *new_crtc_state;
7729 	int ret;
7730 
7731 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7732 
7733 	dm_plane_state = to_dm_plane_state(new_plane_state);
7734 
7735 	if (!dm_plane_state->dc_state)
7736 		return 0;
7737 
7738 	new_crtc_state =
7739 		drm_atomic_get_new_crtc_state(state,
7740 					      new_plane_state->crtc);
7741 	if (!new_crtc_state)
7742 		return -EINVAL;
7743 
7744 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7745 	if (ret)
7746 		return ret;
7747 
7748 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7749 	if (ret)
7750 		return ret;
7751 
7752 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7753 		return 0;
7754 
7755 	return -EINVAL;
7756 }
7757 
7758 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7759 				       struct drm_atomic_state *state)
7760 {
7761 	/* Only support async updates on cursor planes. */
7762 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7763 		return -EINVAL;
7764 
7765 	return 0;
7766 }
7767 
7768 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7769 					 struct drm_atomic_state *state)
7770 {
7771 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7772 									   plane);
7773 	struct drm_plane_state *old_state =
7774 		drm_atomic_get_old_plane_state(state, plane);
7775 
7776 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7777 
7778 	swap(plane->state->fb, new_state->fb);
7779 
7780 	plane->state->src_x = new_state->src_x;
7781 	plane->state->src_y = new_state->src_y;
7782 	plane->state->src_w = new_state->src_w;
7783 	plane->state->src_h = new_state->src_h;
7784 	plane->state->crtc_x = new_state->crtc_x;
7785 	plane->state->crtc_y = new_state->crtc_y;
7786 	plane->state->crtc_w = new_state->crtc_w;
7787 	plane->state->crtc_h = new_state->crtc_h;
7788 
7789 	handle_cursor_update(plane, old_state);
7790 }
7791 
7792 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7793 	.prepare_fb = dm_plane_helper_prepare_fb,
7794 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7795 	.atomic_check = dm_plane_atomic_check,
7796 	.atomic_async_check = dm_plane_atomic_async_check,
7797 	.atomic_async_update = dm_plane_atomic_async_update
7798 };
7799 
7800 /*
7801  * TODO: these are currently initialized to rgb formats only.
7802  * For future use cases we should either initialize them dynamically based on
7803  * plane capabilities, or initialize this array to all formats, so internal drm
7804  * check will succeed, and let DC implement proper check
7805  */
7806 static const uint32_t rgb_formats[] = {
7807 	DRM_FORMAT_XRGB8888,
7808 	DRM_FORMAT_ARGB8888,
7809 	DRM_FORMAT_RGBA8888,
7810 	DRM_FORMAT_XRGB2101010,
7811 	DRM_FORMAT_XBGR2101010,
7812 	DRM_FORMAT_ARGB2101010,
7813 	DRM_FORMAT_ABGR2101010,
7814 	DRM_FORMAT_XRGB16161616,
7815 	DRM_FORMAT_XBGR16161616,
7816 	DRM_FORMAT_ARGB16161616,
7817 	DRM_FORMAT_ABGR16161616,
7818 	DRM_FORMAT_XBGR8888,
7819 	DRM_FORMAT_ABGR8888,
7820 	DRM_FORMAT_RGB565,
7821 };
7822 
7823 static const uint32_t overlay_formats[] = {
7824 	DRM_FORMAT_XRGB8888,
7825 	DRM_FORMAT_ARGB8888,
7826 	DRM_FORMAT_RGBA8888,
7827 	DRM_FORMAT_XBGR8888,
7828 	DRM_FORMAT_ABGR8888,
7829 	DRM_FORMAT_RGB565
7830 };
7831 
7832 static const u32 cursor_formats[] = {
7833 	DRM_FORMAT_ARGB8888
7834 };
7835 
7836 static int get_plane_formats(const struct drm_plane *plane,
7837 			     const struct dc_plane_cap *plane_cap,
7838 			     uint32_t *formats, int max_formats)
7839 {
7840 	int i, num_formats = 0;
7841 
7842 	/*
7843 	 * TODO: Query support for each group of formats directly from
7844 	 * DC plane caps. This will require adding more formats to the
7845 	 * caps list.
7846 	 */
7847 
7848 	switch (plane->type) {
7849 	case DRM_PLANE_TYPE_PRIMARY:
7850 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7851 			if (num_formats >= max_formats)
7852 				break;
7853 
7854 			formats[num_formats++] = rgb_formats[i];
7855 		}
7856 
7857 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7858 			formats[num_formats++] = DRM_FORMAT_NV12;
7859 		if (plane_cap && plane_cap->pixel_format_support.p010)
7860 			formats[num_formats++] = DRM_FORMAT_P010;
7861 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7862 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7863 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7864 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7865 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7866 		}
7867 		break;
7868 
7869 	case DRM_PLANE_TYPE_OVERLAY:
7870 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7871 			if (num_formats >= max_formats)
7872 				break;
7873 
7874 			formats[num_formats++] = overlay_formats[i];
7875 		}
7876 		break;
7877 
7878 	case DRM_PLANE_TYPE_CURSOR:
7879 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7880 			if (num_formats >= max_formats)
7881 				break;
7882 
7883 			formats[num_formats++] = cursor_formats[i];
7884 		}
7885 		break;
7886 	}
7887 
7888 	return num_formats;
7889 }
7890 
7891 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7892 				struct drm_plane *plane,
7893 				unsigned long possible_crtcs,
7894 				const struct dc_plane_cap *plane_cap)
7895 {
7896 	uint32_t formats[32];
7897 	int num_formats;
7898 	int res = -EPERM;
7899 	unsigned int supported_rotations;
7900 	uint64_t *modifiers = NULL;
7901 
7902 	num_formats = get_plane_formats(plane, plane_cap, formats,
7903 					ARRAY_SIZE(formats));
7904 
7905 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7906 	if (res)
7907 		return res;
7908 
7909 	if (modifiers == NULL)
7910 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7911 
7912 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7913 				       &dm_plane_funcs, formats, num_formats,
7914 				       modifiers, plane->type, NULL);
7915 	kfree(modifiers);
7916 	if (res)
7917 		return res;
7918 
7919 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7920 	    plane_cap && plane_cap->per_pixel_alpha) {
7921 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7922 					  BIT(DRM_MODE_BLEND_PREMULTI) |
7923 					  BIT(DRM_MODE_BLEND_COVERAGE);
7924 
7925 		drm_plane_create_alpha_property(plane);
7926 		drm_plane_create_blend_mode_property(plane, blend_caps);
7927 	}
7928 
7929 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7930 	    plane_cap &&
7931 	    (plane_cap->pixel_format_support.nv12 ||
7932 	     plane_cap->pixel_format_support.p010)) {
7933 		/* This only affects YUV formats. */
7934 		drm_plane_create_color_properties(
7935 			plane,
7936 			BIT(DRM_COLOR_YCBCR_BT601) |
7937 			BIT(DRM_COLOR_YCBCR_BT709) |
7938 			BIT(DRM_COLOR_YCBCR_BT2020),
7939 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7940 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7941 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7942 	}
7943 
7944 	supported_rotations =
7945 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7946 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7947 
7948 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7949 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7950 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7951 						   supported_rotations);
7952 
7953 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7954 
7955 	/* Create (reset) the plane state */
7956 	if (plane->funcs->reset)
7957 		plane->funcs->reset(plane);
7958 
7959 	return 0;
7960 }
7961 
7962 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7963 			       struct drm_plane *plane,
7964 			       uint32_t crtc_index)
7965 {
7966 	struct amdgpu_crtc *acrtc = NULL;
7967 	struct drm_plane *cursor_plane;
7968 
7969 	int res = -ENOMEM;
7970 
7971 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7972 	if (!cursor_plane)
7973 		goto fail;
7974 
7975 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7976 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7977 
7978 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7979 	if (!acrtc)
7980 		goto fail;
7981 
7982 	res = drm_crtc_init_with_planes(
7983 			dm->ddev,
7984 			&acrtc->base,
7985 			plane,
7986 			cursor_plane,
7987 			&amdgpu_dm_crtc_funcs, NULL);
7988 
7989 	if (res)
7990 		goto fail;
7991 
7992 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7993 
7994 	/* Create (reset) the plane state */
7995 	if (acrtc->base.funcs->reset)
7996 		acrtc->base.funcs->reset(&acrtc->base);
7997 
7998 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7999 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8000 
8001 	acrtc->crtc_id = crtc_index;
8002 	acrtc->base.enabled = false;
8003 	acrtc->otg_inst = -1;
8004 
8005 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8006 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8007 				   true, MAX_COLOR_LUT_ENTRIES);
8008 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8009 
8010 	return 0;
8011 
8012 fail:
8013 	kfree(acrtc);
8014 	kfree(cursor_plane);
8015 	return res;
8016 }
8017 
8018 
8019 static int to_drm_connector_type(enum signal_type st)
8020 {
8021 	switch (st) {
8022 	case SIGNAL_TYPE_HDMI_TYPE_A:
8023 		return DRM_MODE_CONNECTOR_HDMIA;
8024 	case SIGNAL_TYPE_EDP:
8025 		return DRM_MODE_CONNECTOR_eDP;
8026 	case SIGNAL_TYPE_LVDS:
8027 		return DRM_MODE_CONNECTOR_LVDS;
8028 	case SIGNAL_TYPE_RGB:
8029 		return DRM_MODE_CONNECTOR_VGA;
8030 	case SIGNAL_TYPE_DISPLAY_PORT:
8031 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8032 		return DRM_MODE_CONNECTOR_DisplayPort;
8033 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8034 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8035 		return DRM_MODE_CONNECTOR_DVID;
8036 	case SIGNAL_TYPE_VIRTUAL:
8037 		return DRM_MODE_CONNECTOR_VIRTUAL;
8038 
8039 	default:
8040 		return DRM_MODE_CONNECTOR_Unknown;
8041 	}
8042 }
8043 
8044 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8045 {
8046 	struct drm_encoder *encoder;
8047 
8048 	/* There is only one encoder per connector */
8049 	drm_connector_for_each_possible_encoder(connector, encoder)
8050 		return encoder;
8051 
8052 	return NULL;
8053 }
8054 
8055 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8056 {
8057 	struct drm_encoder *encoder;
8058 	struct amdgpu_encoder *amdgpu_encoder;
8059 
8060 	encoder = amdgpu_dm_connector_to_encoder(connector);
8061 
8062 	if (encoder == NULL)
8063 		return;
8064 
8065 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8066 
8067 	amdgpu_encoder->native_mode.clock = 0;
8068 
8069 	if (!list_empty(&connector->probed_modes)) {
8070 		struct drm_display_mode *preferred_mode = NULL;
8071 
8072 		list_for_each_entry(preferred_mode,
8073 				    &connector->probed_modes,
8074 				    head) {
8075 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8076 				amdgpu_encoder->native_mode = *preferred_mode;
8077 
8078 			break;
8079 		}
8080 
8081 	}
8082 }
8083 
8084 static struct drm_display_mode *
8085 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8086 			     char *name,
8087 			     int hdisplay, int vdisplay)
8088 {
8089 	struct drm_device *dev = encoder->dev;
8090 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8091 	struct drm_display_mode *mode = NULL;
8092 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8093 
8094 	mode = drm_mode_duplicate(dev, native_mode);
8095 
8096 	if (mode == NULL)
8097 		return NULL;
8098 
8099 	mode->hdisplay = hdisplay;
8100 	mode->vdisplay = vdisplay;
8101 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8102 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8103 
8104 	return mode;
8105 
8106 }
8107 
8108 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8109 						 struct drm_connector *connector)
8110 {
8111 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8112 	struct drm_display_mode *mode = NULL;
8113 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8114 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8115 				to_amdgpu_dm_connector(connector);
8116 	int i;
8117 	int n;
8118 	struct mode_size {
8119 		char name[DRM_DISPLAY_MODE_LEN];
8120 		int w;
8121 		int h;
8122 	} common_modes[] = {
8123 		{  "640x480",  640,  480},
8124 		{  "800x600",  800,  600},
8125 		{ "1024x768", 1024,  768},
8126 		{ "1280x720", 1280,  720},
8127 		{ "1280x800", 1280,  800},
8128 		{"1280x1024", 1280, 1024},
8129 		{ "1440x900", 1440,  900},
8130 		{"1680x1050", 1680, 1050},
8131 		{"1600x1200", 1600, 1200},
8132 		{"1920x1080", 1920, 1080},
8133 		{"1920x1200", 1920, 1200}
8134 	};
8135 
8136 	n = ARRAY_SIZE(common_modes);
8137 
8138 	for (i = 0; i < n; i++) {
8139 		struct drm_display_mode *curmode = NULL;
8140 		bool mode_existed = false;
8141 
8142 		if (common_modes[i].w > native_mode->hdisplay ||
8143 		    common_modes[i].h > native_mode->vdisplay ||
8144 		   (common_modes[i].w == native_mode->hdisplay &&
8145 		    common_modes[i].h == native_mode->vdisplay))
8146 			continue;
8147 
8148 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8149 			if (common_modes[i].w == curmode->hdisplay &&
8150 			    common_modes[i].h == curmode->vdisplay) {
8151 				mode_existed = true;
8152 				break;
8153 			}
8154 		}
8155 
8156 		if (mode_existed)
8157 			continue;
8158 
8159 		mode = amdgpu_dm_create_common_mode(encoder,
8160 				common_modes[i].name, common_modes[i].w,
8161 				common_modes[i].h);
8162 		if (!mode)
8163 			continue;
8164 
8165 		drm_mode_probed_add(connector, mode);
8166 		amdgpu_dm_connector->num_modes++;
8167 	}
8168 }
8169 
8170 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8171 {
8172 	struct drm_encoder *encoder;
8173 	struct amdgpu_encoder *amdgpu_encoder;
8174 	const struct drm_display_mode *native_mode;
8175 
8176 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8177 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8178 		return;
8179 
8180 	encoder = amdgpu_dm_connector_to_encoder(connector);
8181 	if (!encoder)
8182 		return;
8183 
8184 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8185 
8186 	native_mode = &amdgpu_encoder->native_mode;
8187 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8188 		return;
8189 
8190 	drm_connector_set_panel_orientation_with_quirk(connector,
8191 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8192 						       native_mode->hdisplay,
8193 						       native_mode->vdisplay);
8194 }
8195 
8196 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8197 					      struct edid *edid)
8198 {
8199 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8200 			to_amdgpu_dm_connector(connector);
8201 
8202 	if (edid) {
8203 		/* empty probed_modes */
8204 		INIT_LIST_HEAD(&connector->probed_modes);
8205 		amdgpu_dm_connector->num_modes =
8206 				drm_add_edid_modes(connector, edid);
8207 
8208 		/* sorting the probed modes before calling function
8209 		 * amdgpu_dm_get_native_mode() since EDID can have
8210 		 * more than one preferred mode. The modes that are
8211 		 * later in the probed mode list could be of higher
8212 		 * and preferred resolution. For example, 3840x2160
8213 		 * resolution in base EDID preferred timing and 4096x2160
8214 		 * preferred resolution in DID extension block later.
8215 		 */
8216 		drm_mode_sort(&connector->probed_modes);
8217 		amdgpu_dm_get_native_mode(connector);
8218 
8219 		/* Freesync capabilities are reset by calling
8220 		 * drm_add_edid_modes() and need to be
8221 		 * restored here.
8222 		 */
8223 		amdgpu_dm_update_freesync_caps(connector, edid);
8224 
8225 		amdgpu_set_panel_orientation(connector);
8226 	} else {
8227 		amdgpu_dm_connector->num_modes = 0;
8228 	}
8229 }
8230 
8231 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8232 			      struct drm_display_mode *mode)
8233 {
8234 	struct drm_display_mode *m;
8235 
8236 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8237 		if (drm_mode_equal(m, mode))
8238 			return true;
8239 	}
8240 
8241 	return false;
8242 }
8243 
8244 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8245 {
8246 	const struct drm_display_mode *m;
8247 	struct drm_display_mode *new_mode;
8248 	uint i;
8249 	uint32_t new_modes_count = 0;
8250 
8251 	/* Standard FPS values
8252 	 *
8253 	 * 23.976       - TV/NTSC
8254 	 * 24 	        - Cinema
8255 	 * 25 	        - TV/PAL
8256 	 * 29.97        - TV/NTSC
8257 	 * 30 	        - TV/NTSC
8258 	 * 48 	        - Cinema HFR
8259 	 * 50 	        - TV/PAL
8260 	 * 60 	        - Commonly used
8261 	 * 48,72,96,120 - Multiples of 24
8262 	 */
8263 	static const uint32_t common_rates[] = {
8264 		23976, 24000, 25000, 29970, 30000,
8265 		48000, 50000, 60000, 72000, 96000, 120000
8266 	};
8267 
8268 	/*
8269 	 * Find mode with highest refresh rate with the same resolution
8270 	 * as the preferred mode. Some monitors report a preferred mode
8271 	 * with lower resolution than the highest refresh rate supported.
8272 	 */
8273 
8274 	m = get_highest_refresh_rate_mode(aconnector, true);
8275 	if (!m)
8276 		return 0;
8277 
8278 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8279 		uint64_t target_vtotal, target_vtotal_diff;
8280 		uint64_t num, den;
8281 
8282 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8283 			continue;
8284 
8285 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8286 		    common_rates[i] > aconnector->max_vfreq * 1000)
8287 			continue;
8288 
8289 		num = (unsigned long long)m->clock * 1000 * 1000;
8290 		den = common_rates[i] * (unsigned long long)m->htotal;
8291 		target_vtotal = div_u64(num, den);
8292 		target_vtotal_diff = target_vtotal - m->vtotal;
8293 
8294 		/* Check for illegal modes */
8295 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8296 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8297 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8298 			continue;
8299 
8300 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8301 		if (!new_mode)
8302 			goto out;
8303 
8304 		new_mode->vtotal += (u16)target_vtotal_diff;
8305 		new_mode->vsync_start += (u16)target_vtotal_diff;
8306 		new_mode->vsync_end += (u16)target_vtotal_diff;
8307 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8308 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8309 
8310 		if (!is_duplicate_mode(aconnector, new_mode)) {
8311 			drm_mode_probed_add(&aconnector->base, new_mode);
8312 			new_modes_count += 1;
8313 		} else
8314 			drm_mode_destroy(aconnector->base.dev, new_mode);
8315 	}
8316  out:
8317 	return new_modes_count;
8318 }
8319 
8320 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8321 						   struct edid *edid)
8322 {
8323 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8324 		to_amdgpu_dm_connector(connector);
8325 
8326 	if (!edid)
8327 		return;
8328 
8329 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8330 		amdgpu_dm_connector->num_modes +=
8331 			add_fs_modes(amdgpu_dm_connector);
8332 }
8333 
8334 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8335 {
8336 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8337 			to_amdgpu_dm_connector(connector);
8338 	struct drm_encoder *encoder;
8339 	struct edid *edid = amdgpu_dm_connector->edid;
8340 
8341 	encoder = amdgpu_dm_connector_to_encoder(connector);
8342 
8343 	if (!drm_edid_is_valid(edid)) {
8344 		amdgpu_dm_connector->num_modes =
8345 				drm_add_modes_noedid(connector, 640, 480);
8346 	} else {
8347 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8348 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8349 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8350 	}
8351 	amdgpu_dm_fbc_init(connector);
8352 
8353 	return amdgpu_dm_connector->num_modes;
8354 }
8355 
8356 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8357 				     struct amdgpu_dm_connector *aconnector,
8358 				     int connector_type,
8359 				     struct dc_link *link,
8360 				     int link_index)
8361 {
8362 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8363 
8364 	/*
8365 	 * Some of the properties below require access to state, like bpc.
8366 	 * Allocate some default initial connector state with our reset helper.
8367 	 */
8368 	if (aconnector->base.funcs->reset)
8369 		aconnector->base.funcs->reset(&aconnector->base);
8370 
8371 	aconnector->connector_id = link_index;
8372 	aconnector->dc_link = link;
8373 	aconnector->base.interlace_allowed = false;
8374 	aconnector->base.doublescan_allowed = false;
8375 	aconnector->base.stereo_allowed = false;
8376 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8377 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8378 	aconnector->audio_inst = -1;
8379 	mutex_init(&aconnector->hpd_lock);
8380 
8381 	/*
8382 	 * configure support HPD hot plug connector_>polled default value is 0
8383 	 * which means HPD hot plug not supported
8384 	 */
8385 	switch (connector_type) {
8386 	case DRM_MODE_CONNECTOR_HDMIA:
8387 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8388 		aconnector->base.ycbcr_420_allowed =
8389 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8390 		break;
8391 	case DRM_MODE_CONNECTOR_DisplayPort:
8392 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8393 		link->link_enc = link_enc_cfg_get_link_enc(link);
8394 		ASSERT(link->link_enc);
8395 		if (link->link_enc)
8396 			aconnector->base.ycbcr_420_allowed =
8397 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8398 		break;
8399 	case DRM_MODE_CONNECTOR_DVID:
8400 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8401 		break;
8402 	default:
8403 		break;
8404 	}
8405 
8406 	drm_object_attach_property(&aconnector->base.base,
8407 				dm->ddev->mode_config.scaling_mode_property,
8408 				DRM_MODE_SCALE_NONE);
8409 
8410 	drm_object_attach_property(&aconnector->base.base,
8411 				adev->mode_info.underscan_property,
8412 				UNDERSCAN_OFF);
8413 	drm_object_attach_property(&aconnector->base.base,
8414 				adev->mode_info.underscan_hborder_property,
8415 				0);
8416 	drm_object_attach_property(&aconnector->base.base,
8417 				adev->mode_info.underscan_vborder_property,
8418 				0);
8419 
8420 	if (!aconnector->mst_port)
8421 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8422 
8423 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8424 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8425 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8426 
8427 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8428 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8429 		drm_object_attach_property(&aconnector->base.base,
8430 				adev->mode_info.abm_level_property, 0);
8431 	}
8432 
8433 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8434 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8435 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8436 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8437 
8438 		if (!aconnector->mst_port)
8439 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8440 
8441 #ifdef CONFIG_DRM_AMD_DC_HDCP
8442 		if (adev->dm.hdcp_workqueue)
8443 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8444 #endif
8445 	}
8446 }
8447 
8448 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8449 			      struct i2c_msg *msgs, int num)
8450 {
8451 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8452 	struct ddc_service *ddc_service = i2c->ddc_service;
8453 	struct i2c_command cmd;
8454 	int i;
8455 	int result = -EIO;
8456 
8457 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8458 
8459 	if (!cmd.payloads)
8460 		return result;
8461 
8462 	cmd.number_of_payloads = num;
8463 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8464 	cmd.speed = 100;
8465 
8466 	for (i = 0; i < num; i++) {
8467 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8468 		cmd.payloads[i].address = msgs[i].addr;
8469 		cmd.payloads[i].length = msgs[i].len;
8470 		cmd.payloads[i].data = msgs[i].buf;
8471 	}
8472 
8473 	if (dc_submit_i2c(
8474 			ddc_service->ctx->dc,
8475 			ddc_service->ddc_pin->hw_info.ddc_channel,
8476 			&cmd))
8477 		result = num;
8478 
8479 	kfree(cmd.payloads);
8480 	return result;
8481 }
8482 
8483 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8484 {
8485 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8486 }
8487 
8488 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8489 	.master_xfer = amdgpu_dm_i2c_xfer,
8490 	.functionality = amdgpu_dm_i2c_func,
8491 };
8492 
8493 static struct amdgpu_i2c_adapter *
8494 create_i2c(struct ddc_service *ddc_service,
8495 	   int link_index,
8496 	   int *res)
8497 {
8498 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8499 	struct amdgpu_i2c_adapter *i2c;
8500 
8501 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8502 	if (!i2c)
8503 		return NULL;
8504 	i2c->base.owner = THIS_MODULE;
8505 	i2c->base.class = I2C_CLASS_DDC;
8506 	i2c->base.dev.parent = &adev->pdev->dev;
8507 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8508 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8509 	i2c_set_adapdata(&i2c->base, i2c);
8510 	i2c->ddc_service = ddc_service;
8511 	if (i2c->ddc_service->ddc_pin)
8512 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8513 
8514 	return i2c;
8515 }
8516 
8517 
8518 /*
8519  * Note: this function assumes that dc_link_detect() was called for the
8520  * dc_link which will be represented by this aconnector.
8521  */
8522 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8523 				    struct amdgpu_dm_connector *aconnector,
8524 				    uint32_t link_index,
8525 				    struct amdgpu_encoder *aencoder)
8526 {
8527 	int res = 0;
8528 	int connector_type;
8529 	struct dc *dc = dm->dc;
8530 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8531 	struct amdgpu_i2c_adapter *i2c;
8532 
8533 	link->priv = aconnector;
8534 
8535 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8536 
8537 	i2c = create_i2c(link->ddc, link->link_index, &res);
8538 	if (!i2c) {
8539 		DRM_ERROR("Failed to create i2c adapter data\n");
8540 		return -ENOMEM;
8541 	}
8542 
8543 	aconnector->i2c = i2c;
8544 	res = i2c_add_adapter(&i2c->base);
8545 
8546 	if (res) {
8547 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8548 		goto out_free;
8549 	}
8550 
8551 	connector_type = to_drm_connector_type(link->connector_signal);
8552 
8553 	res = drm_connector_init_with_ddc(
8554 			dm->ddev,
8555 			&aconnector->base,
8556 			&amdgpu_dm_connector_funcs,
8557 			connector_type,
8558 			&i2c->base);
8559 
8560 	if (res) {
8561 		DRM_ERROR("connector_init failed\n");
8562 		aconnector->connector_id = -1;
8563 		goto out_free;
8564 	}
8565 
8566 	drm_connector_helper_add(
8567 			&aconnector->base,
8568 			&amdgpu_dm_connector_helper_funcs);
8569 
8570 	amdgpu_dm_connector_init_helper(
8571 		dm,
8572 		aconnector,
8573 		connector_type,
8574 		link,
8575 		link_index);
8576 
8577 	drm_connector_attach_encoder(
8578 		&aconnector->base, &aencoder->base);
8579 
8580 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8581 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8582 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8583 
8584 out_free:
8585 	if (res) {
8586 		kfree(i2c);
8587 		aconnector->i2c = NULL;
8588 	}
8589 	return res;
8590 }
8591 
8592 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8593 {
8594 	switch (adev->mode_info.num_crtc) {
8595 	case 1:
8596 		return 0x1;
8597 	case 2:
8598 		return 0x3;
8599 	case 3:
8600 		return 0x7;
8601 	case 4:
8602 		return 0xf;
8603 	case 5:
8604 		return 0x1f;
8605 	case 6:
8606 	default:
8607 		return 0x3f;
8608 	}
8609 }
8610 
8611 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8612 				  struct amdgpu_encoder *aencoder,
8613 				  uint32_t link_index)
8614 {
8615 	struct amdgpu_device *adev = drm_to_adev(dev);
8616 
8617 	int res = drm_encoder_init(dev,
8618 				   &aencoder->base,
8619 				   &amdgpu_dm_encoder_funcs,
8620 				   DRM_MODE_ENCODER_TMDS,
8621 				   NULL);
8622 
8623 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8624 
8625 	if (!res)
8626 		aencoder->encoder_id = link_index;
8627 	else
8628 		aencoder->encoder_id = -1;
8629 
8630 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8631 
8632 	return res;
8633 }
8634 
8635 static void manage_dm_interrupts(struct amdgpu_device *adev,
8636 				 struct amdgpu_crtc *acrtc,
8637 				 bool enable)
8638 {
8639 	/*
8640 	 * We have no guarantee that the frontend index maps to the same
8641 	 * backend index - some even map to more than one.
8642 	 *
8643 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8644 	 */
8645 	int irq_type =
8646 		amdgpu_display_crtc_idx_to_irq_type(
8647 			adev,
8648 			acrtc->crtc_id);
8649 
8650 	if (enable) {
8651 		drm_crtc_vblank_on(&acrtc->base);
8652 		amdgpu_irq_get(
8653 			adev,
8654 			&adev->pageflip_irq,
8655 			irq_type);
8656 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8657 		amdgpu_irq_get(
8658 			adev,
8659 			&adev->vline0_irq,
8660 			irq_type);
8661 #endif
8662 	} else {
8663 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8664 		amdgpu_irq_put(
8665 			adev,
8666 			&adev->vline0_irq,
8667 			irq_type);
8668 #endif
8669 		amdgpu_irq_put(
8670 			adev,
8671 			&adev->pageflip_irq,
8672 			irq_type);
8673 		drm_crtc_vblank_off(&acrtc->base);
8674 	}
8675 }
8676 
8677 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8678 				      struct amdgpu_crtc *acrtc)
8679 {
8680 	int irq_type =
8681 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8682 
8683 	/**
8684 	 * This reads the current state for the IRQ and force reapplies
8685 	 * the setting to hardware.
8686 	 */
8687 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8688 }
8689 
8690 static bool
8691 is_scaling_state_different(const struct dm_connector_state *dm_state,
8692 			   const struct dm_connector_state *old_dm_state)
8693 {
8694 	if (dm_state->scaling != old_dm_state->scaling)
8695 		return true;
8696 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8697 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8698 			return true;
8699 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8700 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8701 			return true;
8702 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8703 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8704 		return true;
8705 	return false;
8706 }
8707 
8708 #ifdef CONFIG_DRM_AMD_DC_HDCP
8709 static bool is_content_protection_different(struct drm_connector_state *state,
8710 					    const struct drm_connector_state *old_state,
8711 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8712 {
8713 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8714 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8715 
8716 	/* Handle: Type0/1 change */
8717 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8718 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8719 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8720 		return true;
8721 	}
8722 
8723 	/* CP is being re enabled, ignore this
8724 	 *
8725 	 * Handles:	ENABLED -> DESIRED
8726 	 */
8727 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8728 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8729 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8730 		return false;
8731 	}
8732 
8733 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8734 	 *
8735 	 * Handles:	UNDESIRED -> ENABLED
8736 	 */
8737 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8738 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8739 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8740 
8741 	/* Stream removed and re-enabled
8742 	 *
8743 	 * Can sometimes overlap with the HPD case,
8744 	 * thus set update_hdcp to false to avoid
8745 	 * setting HDCP multiple times.
8746 	 *
8747 	 * Handles:	DESIRED -> DESIRED (Special case)
8748 	 */
8749 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8750 		state->crtc && state->crtc->enabled &&
8751 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8752 		dm_con_state->update_hdcp = false;
8753 		return true;
8754 	}
8755 
8756 	/* Hot-plug, headless s3, dpms
8757 	 *
8758 	 * Only start HDCP if the display is connected/enabled.
8759 	 * update_hdcp flag will be set to false until the next
8760 	 * HPD comes in.
8761 	 *
8762 	 * Handles:	DESIRED -> DESIRED (Special case)
8763 	 */
8764 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8765 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8766 		dm_con_state->update_hdcp = false;
8767 		return true;
8768 	}
8769 
8770 	/*
8771 	 * Handles:	UNDESIRED -> UNDESIRED
8772 	 *		DESIRED -> DESIRED
8773 	 *		ENABLED -> ENABLED
8774 	 */
8775 	if (old_state->content_protection == state->content_protection)
8776 		return false;
8777 
8778 	/*
8779 	 * Handles:	UNDESIRED -> DESIRED
8780 	 *		DESIRED -> UNDESIRED
8781 	 *		ENABLED -> UNDESIRED
8782 	 */
8783 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8784 		return true;
8785 
8786 	/*
8787 	 * Handles:	DESIRED -> ENABLED
8788 	 */
8789 	return false;
8790 }
8791 
8792 #endif
8793 static void remove_stream(struct amdgpu_device *adev,
8794 			  struct amdgpu_crtc *acrtc,
8795 			  struct dc_stream_state *stream)
8796 {
8797 	/* this is the update mode case */
8798 
8799 	acrtc->otg_inst = -1;
8800 	acrtc->enabled = false;
8801 }
8802 
8803 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8804 			       struct dc_cursor_position *position)
8805 {
8806 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8807 	int x, y;
8808 	int xorigin = 0, yorigin = 0;
8809 
8810 	if (!crtc || !plane->state->fb)
8811 		return 0;
8812 
8813 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8814 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8815 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8816 			  __func__,
8817 			  plane->state->crtc_w,
8818 			  plane->state->crtc_h);
8819 		return -EINVAL;
8820 	}
8821 
8822 	x = plane->state->crtc_x;
8823 	y = plane->state->crtc_y;
8824 
8825 	if (x <= -amdgpu_crtc->max_cursor_width ||
8826 	    y <= -amdgpu_crtc->max_cursor_height)
8827 		return 0;
8828 
8829 	if (x < 0) {
8830 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8831 		x = 0;
8832 	}
8833 	if (y < 0) {
8834 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8835 		y = 0;
8836 	}
8837 	position->enable = true;
8838 	position->translate_by_source = true;
8839 	position->x = x;
8840 	position->y = y;
8841 	position->x_hotspot = xorigin;
8842 	position->y_hotspot = yorigin;
8843 
8844 	return 0;
8845 }
8846 
8847 static void handle_cursor_update(struct drm_plane *plane,
8848 				 struct drm_plane_state *old_plane_state)
8849 {
8850 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8851 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8852 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8853 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8854 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8855 	uint64_t address = afb ? afb->address : 0;
8856 	struct dc_cursor_position position = {0};
8857 	struct dc_cursor_attributes attributes;
8858 	int ret;
8859 
8860 	if (!plane->state->fb && !old_plane_state->fb)
8861 		return;
8862 
8863 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8864 		      __func__,
8865 		      amdgpu_crtc->crtc_id,
8866 		      plane->state->crtc_w,
8867 		      plane->state->crtc_h);
8868 
8869 	ret = get_cursor_position(plane, crtc, &position);
8870 	if (ret)
8871 		return;
8872 
8873 	if (!position.enable) {
8874 		/* turn off cursor */
8875 		if (crtc_state && crtc_state->stream) {
8876 			mutex_lock(&adev->dm.dc_lock);
8877 			dc_stream_set_cursor_position(crtc_state->stream,
8878 						      &position);
8879 			mutex_unlock(&adev->dm.dc_lock);
8880 		}
8881 		return;
8882 	}
8883 
8884 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8885 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8886 
8887 	memset(&attributes, 0, sizeof(attributes));
8888 	attributes.address.high_part = upper_32_bits(address);
8889 	attributes.address.low_part  = lower_32_bits(address);
8890 	attributes.width             = plane->state->crtc_w;
8891 	attributes.height            = plane->state->crtc_h;
8892 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8893 	attributes.rotation_angle    = 0;
8894 	attributes.attribute_flags.value = 0;
8895 
8896 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8897 
8898 	if (crtc_state->stream) {
8899 		mutex_lock(&adev->dm.dc_lock);
8900 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8901 							 &attributes))
8902 			DRM_ERROR("DC failed to set cursor attributes\n");
8903 
8904 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8905 						   &position))
8906 			DRM_ERROR("DC failed to set cursor position\n");
8907 		mutex_unlock(&adev->dm.dc_lock);
8908 	}
8909 }
8910 
8911 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8912 {
8913 
8914 	assert_spin_locked(&acrtc->base.dev->event_lock);
8915 	WARN_ON(acrtc->event);
8916 
8917 	acrtc->event = acrtc->base.state->event;
8918 
8919 	/* Set the flip status */
8920 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8921 
8922 	/* Mark this event as consumed */
8923 	acrtc->base.state->event = NULL;
8924 
8925 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8926 		     acrtc->crtc_id);
8927 }
8928 
8929 static void update_freesync_state_on_stream(
8930 	struct amdgpu_display_manager *dm,
8931 	struct dm_crtc_state *new_crtc_state,
8932 	struct dc_stream_state *new_stream,
8933 	struct dc_plane_state *surface,
8934 	u32 flip_timestamp_in_us)
8935 {
8936 	struct mod_vrr_params vrr_params;
8937 	struct dc_info_packet vrr_infopacket = {0};
8938 	struct amdgpu_device *adev = dm->adev;
8939 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8940 	unsigned long flags;
8941 	bool pack_sdp_v1_3 = false;
8942 
8943 	if (!new_stream)
8944 		return;
8945 
8946 	/*
8947 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8948 	 * For now it's sufficient to just guard against these conditions.
8949 	 */
8950 
8951 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8952 		return;
8953 
8954 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8955         vrr_params = acrtc->dm_irq_params.vrr_params;
8956 
8957 	if (surface) {
8958 		mod_freesync_handle_preflip(
8959 			dm->freesync_module,
8960 			surface,
8961 			new_stream,
8962 			flip_timestamp_in_us,
8963 			&vrr_params);
8964 
8965 		if (adev->family < AMDGPU_FAMILY_AI &&
8966 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8967 			mod_freesync_handle_v_update(dm->freesync_module,
8968 						     new_stream, &vrr_params);
8969 
8970 			/* Need to call this before the frame ends. */
8971 			dc_stream_adjust_vmin_vmax(dm->dc,
8972 						   new_crtc_state->stream,
8973 						   &vrr_params.adjust);
8974 		}
8975 	}
8976 
8977 	mod_freesync_build_vrr_infopacket(
8978 		dm->freesync_module,
8979 		new_stream,
8980 		&vrr_params,
8981 		PACKET_TYPE_VRR,
8982 		TRANSFER_FUNC_UNKNOWN,
8983 		&vrr_infopacket,
8984 		pack_sdp_v1_3);
8985 
8986 	new_crtc_state->freesync_timing_changed |=
8987 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8988 			&vrr_params.adjust,
8989 			sizeof(vrr_params.adjust)) != 0);
8990 
8991 	new_crtc_state->freesync_vrr_info_changed |=
8992 		(memcmp(&new_crtc_state->vrr_infopacket,
8993 			&vrr_infopacket,
8994 			sizeof(vrr_infopacket)) != 0);
8995 
8996 	acrtc->dm_irq_params.vrr_params = vrr_params;
8997 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8998 
8999 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9000 	new_stream->vrr_infopacket = vrr_infopacket;
9001 
9002 	if (new_crtc_state->freesync_vrr_info_changed)
9003 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9004 			      new_crtc_state->base.crtc->base.id,
9005 			      (int)new_crtc_state->base.vrr_enabled,
9006 			      (int)vrr_params.state);
9007 
9008 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9009 }
9010 
9011 static void update_stream_irq_parameters(
9012 	struct amdgpu_display_manager *dm,
9013 	struct dm_crtc_state *new_crtc_state)
9014 {
9015 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9016 	struct mod_vrr_params vrr_params;
9017 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9018 	struct amdgpu_device *adev = dm->adev;
9019 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9020 	unsigned long flags;
9021 
9022 	if (!new_stream)
9023 		return;
9024 
9025 	/*
9026 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9027 	 * For now it's sufficient to just guard against these conditions.
9028 	 */
9029 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9030 		return;
9031 
9032 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9033 	vrr_params = acrtc->dm_irq_params.vrr_params;
9034 
9035 	if (new_crtc_state->vrr_supported &&
9036 	    config.min_refresh_in_uhz &&
9037 	    config.max_refresh_in_uhz) {
9038 		/*
9039 		 * if freesync compatible mode was set, config.state will be set
9040 		 * in atomic check
9041 		 */
9042 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9043 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9044 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9045 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9046 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9047 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9048 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9049 		} else {
9050 			config.state = new_crtc_state->base.vrr_enabled ?
9051 						     VRR_STATE_ACTIVE_VARIABLE :
9052 						     VRR_STATE_INACTIVE;
9053 		}
9054 	} else {
9055 		config.state = VRR_STATE_UNSUPPORTED;
9056 	}
9057 
9058 	mod_freesync_build_vrr_params(dm->freesync_module,
9059 				      new_stream,
9060 				      &config, &vrr_params);
9061 
9062 	new_crtc_state->freesync_timing_changed |=
9063 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9064 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9065 
9066 	new_crtc_state->freesync_config = config;
9067 	/* Copy state for access from DM IRQ handler */
9068 	acrtc->dm_irq_params.freesync_config = config;
9069 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9070 	acrtc->dm_irq_params.vrr_params = vrr_params;
9071 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9072 }
9073 
9074 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9075 					    struct dm_crtc_state *new_state)
9076 {
9077 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9078 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9079 
9080 	if (!old_vrr_active && new_vrr_active) {
9081 		/* Transition VRR inactive -> active:
9082 		 * While VRR is active, we must not disable vblank irq, as a
9083 		 * reenable after disable would compute bogus vblank/pflip
9084 		 * timestamps if it likely happened inside display front-porch.
9085 		 *
9086 		 * We also need vupdate irq for the actual core vblank handling
9087 		 * at end of vblank.
9088 		 */
9089 		dm_set_vupdate_irq(new_state->base.crtc, true);
9090 		drm_crtc_vblank_get(new_state->base.crtc);
9091 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9092 				 __func__, new_state->base.crtc->base.id);
9093 	} else if (old_vrr_active && !new_vrr_active) {
9094 		/* Transition VRR active -> inactive:
9095 		 * Allow vblank irq disable again for fixed refresh rate.
9096 		 */
9097 		dm_set_vupdate_irq(new_state->base.crtc, false);
9098 		drm_crtc_vblank_put(new_state->base.crtc);
9099 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9100 				 __func__, new_state->base.crtc->base.id);
9101 	}
9102 }
9103 
9104 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9105 {
9106 	struct drm_plane *plane;
9107 	struct drm_plane_state *old_plane_state;
9108 	int i;
9109 
9110 	/*
9111 	 * TODO: Make this per-stream so we don't issue redundant updates for
9112 	 * commits with multiple streams.
9113 	 */
9114 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9115 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9116 			handle_cursor_update(plane, old_plane_state);
9117 }
9118 
9119 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9120 				    struct dc_state *dc_state,
9121 				    struct drm_device *dev,
9122 				    struct amdgpu_display_manager *dm,
9123 				    struct drm_crtc *pcrtc,
9124 				    bool wait_for_vblank)
9125 {
9126 	uint32_t i;
9127 	uint64_t timestamp_ns;
9128 	struct drm_plane *plane;
9129 	struct drm_plane_state *old_plane_state, *new_plane_state;
9130 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9131 	struct drm_crtc_state *new_pcrtc_state =
9132 			drm_atomic_get_new_crtc_state(state, pcrtc);
9133 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9134 	struct dm_crtc_state *dm_old_crtc_state =
9135 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9136 	int planes_count = 0, vpos, hpos;
9137 	long r;
9138 	unsigned long flags;
9139 	struct amdgpu_bo *abo;
9140 	uint32_t target_vblank, last_flip_vblank;
9141 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9142 	bool pflip_present = false;
9143 	struct {
9144 		struct dc_surface_update surface_updates[MAX_SURFACES];
9145 		struct dc_plane_info plane_infos[MAX_SURFACES];
9146 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9147 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9148 		struct dc_stream_update stream_update;
9149 	} *bundle;
9150 
9151 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9152 
9153 	if (!bundle) {
9154 		dm_error("Failed to allocate update bundle\n");
9155 		goto cleanup;
9156 	}
9157 
9158 	/*
9159 	 * Disable the cursor first if we're disabling all the planes.
9160 	 * It'll remain on the screen after the planes are re-enabled
9161 	 * if we don't.
9162 	 */
9163 	if (acrtc_state->active_planes == 0)
9164 		amdgpu_dm_commit_cursors(state);
9165 
9166 	/* update planes when needed */
9167 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9168 		struct drm_crtc *crtc = new_plane_state->crtc;
9169 		struct drm_crtc_state *new_crtc_state;
9170 		struct drm_framebuffer *fb = new_plane_state->fb;
9171 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9172 		bool plane_needs_flip;
9173 		struct dc_plane_state *dc_plane;
9174 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9175 
9176 		/* Cursor plane is handled after stream updates */
9177 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9178 			continue;
9179 
9180 		if (!fb || !crtc || pcrtc != crtc)
9181 			continue;
9182 
9183 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9184 		if (!new_crtc_state->active)
9185 			continue;
9186 
9187 		dc_plane = dm_new_plane_state->dc_state;
9188 
9189 		bundle->surface_updates[planes_count].surface = dc_plane;
9190 		if (new_pcrtc_state->color_mgmt_changed) {
9191 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9192 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9193 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9194 		}
9195 
9196 		fill_dc_scaling_info(dm->adev, new_plane_state,
9197 				     &bundle->scaling_infos[planes_count]);
9198 
9199 		bundle->surface_updates[planes_count].scaling_info =
9200 			&bundle->scaling_infos[planes_count];
9201 
9202 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9203 
9204 		pflip_present = pflip_present || plane_needs_flip;
9205 
9206 		if (!plane_needs_flip) {
9207 			planes_count += 1;
9208 			continue;
9209 		}
9210 
9211 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9212 
9213 		/*
9214 		 * Wait for all fences on this FB. Do limited wait to avoid
9215 		 * deadlock during GPU reset when this fence will not signal
9216 		 * but we hold reservation lock for the BO.
9217 		 */
9218 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9219 					  DMA_RESV_USAGE_WRITE, false,
9220 					  msecs_to_jiffies(5000));
9221 		if (unlikely(r <= 0))
9222 			DRM_ERROR("Waiting for fences timed out!");
9223 
9224 		fill_dc_plane_info_and_addr(
9225 			dm->adev, new_plane_state,
9226 			afb->tiling_flags,
9227 			&bundle->plane_infos[planes_count],
9228 			&bundle->flip_addrs[planes_count].address,
9229 			afb->tmz_surface, false);
9230 
9231 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9232 				 new_plane_state->plane->index,
9233 				 bundle->plane_infos[planes_count].dcc.enable);
9234 
9235 		bundle->surface_updates[planes_count].plane_info =
9236 			&bundle->plane_infos[planes_count];
9237 
9238 		/*
9239 		 * Only allow immediate flips for fast updates that don't
9240 		 * change FB pitch, DCC state, rotation or mirroing.
9241 		 */
9242 		bundle->flip_addrs[planes_count].flip_immediate =
9243 			crtc->state->async_flip &&
9244 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9245 
9246 		timestamp_ns = ktime_get_ns();
9247 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9248 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9249 		bundle->surface_updates[planes_count].surface = dc_plane;
9250 
9251 		if (!bundle->surface_updates[planes_count].surface) {
9252 			DRM_ERROR("No surface for CRTC: id=%d\n",
9253 					acrtc_attach->crtc_id);
9254 			continue;
9255 		}
9256 
9257 		if (plane == pcrtc->primary)
9258 			update_freesync_state_on_stream(
9259 				dm,
9260 				acrtc_state,
9261 				acrtc_state->stream,
9262 				dc_plane,
9263 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9264 
9265 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9266 				 __func__,
9267 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9268 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9269 
9270 		planes_count += 1;
9271 
9272 	}
9273 
9274 	if (pflip_present) {
9275 		if (!vrr_active) {
9276 			/* Use old throttling in non-vrr fixed refresh rate mode
9277 			 * to keep flip scheduling based on target vblank counts
9278 			 * working in a backwards compatible way, e.g., for
9279 			 * clients using the GLX_OML_sync_control extension or
9280 			 * DRI3/Present extension with defined target_msc.
9281 			 */
9282 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9283 		}
9284 		else {
9285 			/* For variable refresh rate mode only:
9286 			 * Get vblank of last completed flip to avoid > 1 vrr
9287 			 * flips per video frame by use of throttling, but allow
9288 			 * flip programming anywhere in the possibly large
9289 			 * variable vrr vblank interval for fine-grained flip
9290 			 * timing control and more opportunity to avoid stutter
9291 			 * on late submission of flips.
9292 			 */
9293 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9294 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9295 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9296 		}
9297 
9298 		target_vblank = last_flip_vblank + wait_for_vblank;
9299 
9300 		/*
9301 		 * Wait until we're out of the vertical blank period before the one
9302 		 * targeted by the flip
9303 		 */
9304 		while ((acrtc_attach->enabled &&
9305 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9306 							    0, &vpos, &hpos, NULL,
9307 							    NULL, &pcrtc->hwmode)
9308 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9309 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9310 			(int)(target_vblank -
9311 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9312 			usleep_range(1000, 1100);
9313 		}
9314 
9315 		/**
9316 		 * Prepare the flip event for the pageflip interrupt to handle.
9317 		 *
9318 		 * This only works in the case where we've already turned on the
9319 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9320 		 * from 0 -> n planes we have to skip a hardware generated event
9321 		 * and rely on sending it from software.
9322 		 */
9323 		if (acrtc_attach->base.state->event &&
9324 		    acrtc_state->active_planes > 0 &&
9325 		    !acrtc_state->force_dpms_off) {
9326 			drm_crtc_vblank_get(pcrtc);
9327 
9328 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9329 
9330 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9331 			prepare_flip_isr(acrtc_attach);
9332 
9333 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9334 		}
9335 
9336 		if (acrtc_state->stream) {
9337 			if (acrtc_state->freesync_vrr_info_changed)
9338 				bundle->stream_update.vrr_infopacket =
9339 					&acrtc_state->stream->vrr_infopacket;
9340 		}
9341 	}
9342 
9343 	/* Update the planes if changed or disable if we don't have any. */
9344 	if ((planes_count || acrtc_state->active_planes == 0) &&
9345 		acrtc_state->stream) {
9346 		/*
9347 		 * If PSR or idle optimizations are enabled then flush out
9348 		 * any pending work before hardware programming.
9349 		 */
9350 		if (dm->vblank_control_workqueue)
9351 			flush_workqueue(dm->vblank_control_workqueue);
9352 
9353 		bundle->stream_update.stream = acrtc_state->stream;
9354 		if (new_pcrtc_state->mode_changed) {
9355 			bundle->stream_update.src = acrtc_state->stream->src;
9356 			bundle->stream_update.dst = acrtc_state->stream->dst;
9357 		}
9358 
9359 		if (new_pcrtc_state->color_mgmt_changed) {
9360 			/*
9361 			 * TODO: This isn't fully correct since we've actually
9362 			 * already modified the stream in place.
9363 			 */
9364 			bundle->stream_update.gamut_remap =
9365 				&acrtc_state->stream->gamut_remap_matrix;
9366 			bundle->stream_update.output_csc_transform =
9367 				&acrtc_state->stream->csc_color_matrix;
9368 			bundle->stream_update.out_transfer_func =
9369 				acrtc_state->stream->out_transfer_func;
9370 		}
9371 
9372 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9373 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9374 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9375 
9376 		/*
9377 		 * If FreeSync state on the stream has changed then we need to
9378 		 * re-adjust the min/max bounds now that DC doesn't handle this
9379 		 * as part of commit.
9380 		 */
9381 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9382 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9383 			dc_stream_adjust_vmin_vmax(
9384 				dm->dc, acrtc_state->stream,
9385 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9386 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9387 		}
9388 		mutex_lock(&dm->dc_lock);
9389 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9390 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9391 			amdgpu_dm_psr_disable(acrtc_state->stream);
9392 
9393 		dc_commit_updates_for_stream(dm->dc,
9394 						     bundle->surface_updates,
9395 						     planes_count,
9396 						     acrtc_state->stream,
9397 						     &bundle->stream_update,
9398 						     dc_state);
9399 
9400 		/**
9401 		 * Enable or disable the interrupts on the backend.
9402 		 *
9403 		 * Most pipes are put into power gating when unused.
9404 		 *
9405 		 * When power gating is enabled on a pipe we lose the
9406 		 * interrupt enablement state when power gating is disabled.
9407 		 *
9408 		 * So we need to update the IRQ control state in hardware
9409 		 * whenever the pipe turns on (since it could be previously
9410 		 * power gated) or off (since some pipes can't be power gated
9411 		 * on some ASICs).
9412 		 */
9413 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9414 			dm_update_pflip_irq_state(drm_to_adev(dev),
9415 						  acrtc_attach);
9416 
9417 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9418 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9419 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9420 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9421 
9422 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9423 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9424 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9425 			struct amdgpu_dm_connector *aconn =
9426 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9427 
9428 			if (aconn->psr_skip_count > 0)
9429 				aconn->psr_skip_count--;
9430 
9431 			/* Allow PSR when skip count is 0. */
9432 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9433 		} else {
9434 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9435 		}
9436 
9437 		mutex_unlock(&dm->dc_lock);
9438 	}
9439 
9440 	/*
9441 	 * Update cursor state *after* programming all the planes.
9442 	 * This avoids redundant programming in the case where we're going
9443 	 * to be disabling a single plane - those pipes are being disabled.
9444 	 */
9445 	if (acrtc_state->active_planes)
9446 		amdgpu_dm_commit_cursors(state);
9447 
9448 cleanup:
9449 	kfree(bundle);
9450 }
9451 
9452 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9453 				   struct drm_atomic_state *state)
9454 {
9455 	struct amdgpu_device *adev = drm_to_adev(dev);
9456 	struct amdgpu_dm_connector *aconnector;
9457 	struct drm_connector *connector;
9458 	struct drm_connector_state *old_con_state, *new_con_state;
9459 	struct drm_crtc_state *new_crtc_state;
9460 	struct dm_crtc_state *new_dm_crtc_state;
9461 	const struct dc_stream_status *status;
9462 	int i, inst;
9463 
9464 	/* Notify device removals. */
9465 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9466 		if (old_con_state->crtc != new_con_state->crtc) {
9467 			/* CRTC changes require notification. */
9468 			goto notify;
9469 		}
9470 
9471 		if (!new_con_state->crtc)
9472 			continue;
9473 
9474 		new_crtc_state = drm_atomic_get_new_crtc_state(
9475 			state, new_con_state->crtc);
9476 
9477 		if (!new_crtc_state)
9478 			continue;
9479 
9480 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9481 			continue;
9482 
9483 	notify:
9484 		aconnector = to_amdgpu_dm_connector(connector);
9485 
9486 		mutex_lock(&adev->dm.audio_lock);
9487 		inst = aconnector->audio_inst;
9488 		aconnector->audio_inst = -1;
9489 		mutex_unlock(&adev->dm.audio_lock);
9490 
9491 		amdgpu_dm_audio_eld_notify(adev, inst);
9492 	}
9493 
9494 	/* Notify audio device additions. */
9495 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9496 		if (!new_con_state->crtc)
9497 			continue;
9498 
9499 		new_crtc_state = drm_atomic_get_new_crtc_state(
9500 			state, new_con_state->crtc);
9501 
9502 		if (!new_crtc_state)
9503 			continue;
9504 
9505 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9506 			continue;
9507 
9508 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9509 		if (!new_dm_crtc_state->stream)
9510 			continue;
9511 
9512 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9513 		if (!status)
9514 			continue;
9515 
9516 		aconnector = to_amdgpu_dm_connector(connector);
9517 
9518 		mutex_lock(&adev->dm.audio_lock);
9519 		inst = status->audio_inst;
9520 		aconnector->audio_inst = inst;
9521 		mutex_unlock(&adev->dm.audio_lock);
9522 
9523 		amdgpu_dm_audio_eld_notify(adev, inst);
9524 	}
9525 }
9526 
9527 /*
9528  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9529  * @crtc_state: the DRM CRTC state
9530  * @stream_state: the DC stream state.
9531  *
9532  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9533  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9534  */
9535 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9536 						struct dc_stream_state *stream_state)
9537 {
9538 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9539 }
9540 
9541 /**
9542  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9543  * @state: The atomic state to commit
9544  *
9545  * This will tell DC to commit the constructed DC state from atomic_check,
9546  * programming the hardware. Any failures here implies a hardware failure, since
9547  * atomic check should have filtered anything non-kosher.
9548  */
9549 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9550 {
9551 	struct drm_device *dev = state->dev;
9552 	struct amdgpu_device *adev = drm_to_adev(dev);
9553 	struct amdgpu_display_manager *dm = &adev->dm;
9554 	struct dm_atomic_state *dm_state;
9555 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9556 	uint32_t i, j;
9557 	struct drm_crtc *crtc;
9558 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9559 	unsigned long flags;
9560 	bool wait_for_vblank = true;
9561 	struct drm_connector *connector;
9562 	struct drm_connector_state *old_con_state, *new_con_state;
9563 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9564 	int crtc_disable_count = 0;
9565 	bool mode_set_reset_required = false;
9566 
9567 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9568 
9569 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9570 
9571 	dm_state = dm_atomic_get_new_state(state);
9572 	if (dm_state && dm_state->context) {
9573 		dc_state = dm_state->context;
9574 	} else {
9575 		/* No state changes, retain current state. */
9576 		dc_state_temp = dc_create_state(dm->dc);
9577 		ASSERT(dc_state_temp);
9578 		dc_state = dc_state_temp;
9579 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9580 	}
9581 
9582 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9583 				       new_crtc_state, i) {
9584 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9585 
9586 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9587 
9588 		if (old_crtc_state->active &&
9589 		    (!new_crtc_state->active ||
9590 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9591 			manage_dm_interrupts(adev, acrtc, false);
9592 			dc_stream_release(dm_old_crtc_state->stream);
9593 		}
9594 	}
9595 
9596 	drm_atomic_helper_calc_timestamping_constants(state);
9597 
9598 	/* update changed items */
9599 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9600 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9601 
9602 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9603 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9604 
9605 		drm_dbg_state(state->dev,
9606 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9607 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9608 			"connectors_changed:%d\n",
9609 			acrtc->crtc_id,
9610 			new_crtc_state->enable,
9611 			new_crtc_state->active,
9612 			new_crtc_state->planes_changed,
9613 			new_crtc_state->mode_changed,
9614 			new_crtc_state->active_changed,
9615 			new_crtc_state->connectors_changed);
9616 
9617 		/* Disable cursor if disabling crtc */
9618 		if (old_crtc_state->active && !new_crtc_state->active) {
9619 			struct dc_cursor_position position;
9620 
9621 			memset(&position, 0, sizeof(position));
9622 			mutex_lock(&dm->dc_lock);
9623 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9624 			mutex_unlock(&dm->dc_lock);
9625 		}
9626 
9627 		/* Copy all transient state flags into dc state */
9628 		if (dm_new_crtc_state->stream) {
9629 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9630 							    dm_new_crtc_state->stream);
9631 		}
9632 
9633 		/* handles headless hotplug case, updating new_state and
9634 		 * aconnector as needed
9635 		 */
9636 
9637 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9638 
9639 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9640 
9641 			if (!dm_new_crtc_state->stream) {
9642 				/*
9643 				 * this could happen because of issues with
9644 				 * userspace notifications delivery.
9645 				 * In this case userspace tries to set mode on
9646 				 * display which is disconnected in fact.
9647 				 * dc_sink is NULL in this case on aconnector.
9648 				 * We expect reset mode will come soon.
9649 				 *
9650 				 * This can also happen when unplug is done
9651 				 * during resume sequence ended
9652 				 *
9653 				 * In this case, we want to pretend we still
9654 				 * have a sink to keep the pipe running so that
9655 				 * hw state is consistent with the sw state
9656 				 */
9657 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9658 						__func__, acrtc->base.base.id);
9659 				continue;
9660 			}
9661 
9662 			if (dm_old_crtc_state->stream)
9663 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9664 
9665 			pm_runtime_get_noresume(dev->dev);
9666 
9667 			acrtc->enabled = true;
9668 			acrtc->hw_mode = new_crtc_state->mode;
9669 			crtc->hwmode = new_crtc_state->mode;
9670 			mode_set_reset_required = true;
9671 		} else if (modereset_required(new_crtc_state)) {
9672 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9673 			/* i.e. reset mode */
9674 			if (dm_old_crtc_state->stream)
9675 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9676 
9677 			mode_set_reset_required = true;
9678 		}
9679 	} /* for_each_crtc_in_state() */
9680 
9681 	if (dc_state) {
9682 		/* if there mode set or reset, disable eDP PSR */
9683 		if (mode_set_reset_required) {
9684 			if (dm->vblank_control_workqueue)
9685 				flush_workqueue(dm->vblank_control_workqueue);
9686 
9687 			amdgpu_dm_psr_disable_all(dm);
9688 		}
9689 
9690 		dm_enable_per_frame_crtc_master_sync(dc_state);
9691 		mutex_lock(&dm->dc_lock);
9692 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9693 
9694 		/* Allow idle optimization when vblank count is 0 for display off */
9695 		if (dm->active_vblank_irq_count == 0)
9696 			dc_allow_idle_optimizations(dm->dc, true);
9697 		mutex_unlock(&dm->dc_lock);
9698 	}
9699 
9700 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9701 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9702 
9703 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9704 
9705 		if (dm_new_crtc_state->stream != NULL) {
9706 			const struct dc_stream_status *status =
9707 					dc_stream_get_status(dm_new_crtc_state->stream);
9708 
9709 			if (!status)
9710 				status = dc_stream_get_status_from_state(dc_state,
9711 									 dm_new_crtc_state->stream);
9712 			if (!status)
9713 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9714 			else
9715 				acrtc->otg_inst = status->primary_otg_inst;
9716 		}
9717 	}
9718 #ifdef CONFIG_DRM_AMD_DC_HDCP
9719 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9720 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9721 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9722 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9723 
9724 		new_crtc_state = NULL;
9725 
9726 		if (acrtc)
9727 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9728 
9729 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9730 
9731 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9732 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9733 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9734 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9735 			dm_new_con_state->update_hdcp = true;
9736 			continue;
9737 		}
9738 
9739 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9740 			hdcp_update_display(
9741 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9742 				new_con_state->hdcp_content_type,
9743 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9744 	}
9745 #endif
9746 
9747 	/* Handle connector state changes */
9748 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9749 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9750 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9751 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9752 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9753 		struct dc_stream_update stream_update;
9754 		struct dc_info_packet hdr_packet;
9755 		struct dc_stream_status *status = NULL;
9756 		bool abm_changed, hdr_changed, scaling_changed;
9757 
9758 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9759 		memset(&stream_update, 0, sizeof(stream_update));
9760 
9761 		if (acrtc) {
9762 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9763 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9764 		}
9765 
9766 		/* Skip any modesets/resets */
9767 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9768 			continue;
9769 
9770 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9771 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9772 
9773 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9774 							     dm_old_con_state);
9775 
9776 		abm_changed = dm_new_crtc_state->abm_level !=
9777 			      dm_old_crtc_state->abm_level;
9778 
9779 		hdr_changed =
9780 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9781 
9782 		if (!scaling_changed && !abm_changed && !hdr_changed)
9783 			continue;
9784 
9785 		stream_update.stream = dm_new_crtc_state->stream;
9786 		if (scaling_changed) {
9787 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9788 					dm_new_con_state, dm_new_crtc_state->stream);
9789 
9790 			stream_update.src = dm_new_crtc_state->stream->src;
9791 			stream_update.dst = dm_new_crtc_state->stream->dst;
9792 		}
9793 
9794 		if (abm_changed) {
9795 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9796 
9797 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9798 		}
9799 
9800 		if (hdr_changed) {
9801 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9802 			stream_update.hdr_static_metadata = &hdr_packet;
9803 		}
9804 
9805 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9806 
9807 		if (WARN_ON(!status))
9808 			continue;
9809 
9810 		WARN_ON(!status->plane_count);
9811 
9812 		/*
9813 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9814 		 * Here we create an empty update on each plane.
9815 		 * To fix this, DC should permit updating only stream properties.
9816 		 */
9817 		for (j = 0; j < status->plane_count; j++)
9818 			dummy_updates[j].surface = status->plane_states[0];
9819 
9820 
9821 		mutex_lock(&dm->dc_lock);
9822 		dc_commit_updates_for_stream(dm->dc,
9823 						     dummy_updates,
9824 						     status->plane_count,
9825 						     dm_new_crtc_state->stream,
9826 						     &stream_update,
9827 						     dc_state);
9828 		mutex_unlock(&dm->dc_lock);
9829 	}
9830 
9831 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9832 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9833 				      new_crtc_state, i) {
9834 		if (old_crtc_state->active && !new_crtc_state->active)
9835 			crtc_disable_count++;
9836 
9837 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9838 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9839 
9840 		/* For freesync config update on crtc state and params for irq */
9841 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9842 
9843 		/* Handle vrr on->off / off->on transitions */
9844 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9845 						dm_new_crtc_state);
9846 	}
9847 
9848 	/**
9849 	 * Enable interrupts for CRTCs that are newly enabled or went through
9850 	 * a modeset. It was intentionally deferred until after the front end
9851 	 * state was modified to wait until the OTG was on and so the IRQ
9852 	 * handlers didn't access stale or invalid state.
9853 	 */
9854 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9855 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9856 #ifdef CONFIG_DEBUG_FS
9857 		bool configure_crc = false;
9858 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9859 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9860 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9861 #endif
9862 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9863 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9864 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9865 #endif
9866 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9867 
9868 		if (new_crtc_state->active &&
9869 		    (!old_crtc_state->active ||
9870 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9871 			dc_stream_retain(dm_new_crtc_state->stream);
9872 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9873 			manage_dm_interrupts(adev, acrtc, true);
9874 
9875 #ifdef CONFIG_DEBUG_FS
9876 			/**
9877 			 * Frontend may have changed so reapply the CRC capture
9878 			 * settings for the stream.
9879 			 */
9880 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9881 
9882 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9883 				configure_crc = true;
9884 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9885 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9886 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9887 					acrtc->dm_irq_params.crc_window.update_win = true;
9888 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9889 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9890 					crc_rd_wrk->crtc = crtc;
9891 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9892 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9893 				}
9894 #endif
9895 			}
9896 
9897 			if (configure_crc)
9898 				if (amdgpu_dm_crtc_configure_crc_source(
9899 					crtc, dm_new_crtc_state, cur_crc_src))
9900 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9901 #endif
9902 		}
9903 	}
9904 
9905 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9906 		if (new_crtc_state->async_flip)
9907 			wait_for_vblank = false;
9908 
9909 	/* update planes when needed per crtc*/
9910 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9911 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9912 
9913 		if (dm_new_crtc_state->stream)
9914 			amdgpu_dm_commit_planes(state, dc_state, dev,
9915 						dm, crtc, wait_for_vblank);
9916 	}
9917 
9918 	/* Update audio instances for each connector. */
9919 	amdgpu_dm_commit_audio(dev, state);
9920 
9921 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9922 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9923 	/* restore the backlight level */
9924 	for (i = 0; i < dm->num_of_edps; i++) {
9925 		if (dm->backlight_dev[i] &&
9926 		    (dm->actual_brightness[i] != dm->brightness[i]))
9927 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9928 	}
9929 #endif
9930 	/*
9931 	 * send vblank event on all events not handled in flip and
9932 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9933 	 */
9934 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9935 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9936 
9937 		if (new_crtc_state->event)
9938 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9939 
9940 		new_crtc_state->event = NULL;
9941 	}
9942 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9943 
9944 	/* Signal HW programming completion */
9945 	drm_atomic_helper_commit_hw_done(state);
9946 
9947 	if (wait_for_vblank)
9948 		drm_atomic_helper_wait_for_flip_done(dev, state);
9949 
9950 	drm_atomic_helper_cleanup_planes(dev, state);
9951 
9952 	/* return the stolen vga memory back to VRAM */
9953 	if (!adev->mman.keep_stolen_vga_memory)
9954 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9955 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9956 
9957 	/*
9958 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9959 	 * so we can put the GPU into runtime suspend if we're not driving any
9960 	 * displays anymore
9961 	 */
9962 	for (i = 0; i < crtc_disable_count; i++)
9963 		pm_runtime_put_autosuspend(dev->dev);
9964 	pm_runtime_mark_last_busy(dev->dev);
9965 
9966 	if (dc_state_temp)
9967 		dc_release_state(dc_state_temp);
9968 }
9969 
9970 
9971 static int dm_force_atomic_commit(struct drm_connector *connector)
9972 {
9973 	int ret = 0;
9974 	struct drm_device *ddev = connector->dev;
9975 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9976 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9977 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9978 	struct drm_connector_state *conn_state;
9979 	struct drm_crtc_state *crtc_state;
9980 	struct drm_plane_state *plane_state;
9981 
9982 	if (!state)
9983 		return -ENOMEM;
9984 
9985 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9986 
9987 	/* Construct an atomic state to restore previous display setting */
9988 
9989 	/*
9990 	 * Attach connectors to drm_atomic_state
9991 	 */
9992 	conn_state = drm_atomic_get_connector_state(state, connector);
9993 
9994 	ret = PTR_ERR_OR_ZERO(conn_state);
9995 	if (ret)
9996 		goto out;
9997 
9998 	/* Attach crtc to drm_atomic_state*/
9999 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10000 
10001 	ret = PTR_ERR_OR_ZERO(crtc_state);
10002 	if (ret)
10003 		goto out;
10004 
10005 	/* force a restore */
10006 	crtc_state->mode_changed = true;
10007 
10008 	/* Attach plane to drm_atomic_state */
10009 	plane_state = drm_atomic_get_plane_state(state, plane);
10010 
10011 	ret = PTR_ERR_OR_ZERO(plane_state);
10012 	if (ret)
10013 		goto out;
10014 
10015 	/* Call commit internally with the state we just constructed */
10016 	ret = drm_atomic_commit(state);
10017 
10018 out:
10019 	drm_atomic_state_put(state);
10020 	if (ret)
10021 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10022 
10023 	return ret;
10024 }
10025 
10026 /*
10027  * This function handles all cases when set mode does not come upon hotplug.
10028  * This includes when a display is unplugged then plugged back into the
10029  * same port and when running without usermode desktop manager supprot
10030  */
10031 void dm_restore_drm_connector_state(struct drm_device *dev,
10032 				    struct drm_connector *connector)
10033 {
10034 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10035 	struct amdgpu_crtc *disconnected_acrtc;
10036 	struct dm_crtc_state *acrtc_state;
10037 
10038 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10039 		return;
10040 
10041 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10042 	if (!disconnected_acrtc)
10043 		return;
10044 
10045 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10046 	if (!acrtc_state->stream)
10047 		return;
10048 
10049 	/*
10050 	 * If the previous sink is not released and different from the current,
10051 	 * we deduce we are in a state where we can not rely on usermode call
10052 	 * to turn on the display, so we do it here
10053 	 */
10054 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10055 		dm_force_atomic_commit(&aconnector->base);
10056 }
10057 
10058 /*
10059  * Grabs all modesetting locks to serialize against any blocking commits,
10060  * Waits for completion of all non blocking commits.
10061  */
10062 static int do_aquire_global_lock(struct drm_device *dev,
10063 				 struct drm_atomic_state *state)
10064 {
10065 	struct drm_crtc *crtc;
10066 	struct drm_crtc_commit *commit;
10067 	long ret;
10068 
10069 	/*
10070 	 * Adding all modeset locks to aquire_ctx will
10071 	 * ensure that when the framework release it the
10072 	 * extra locks we are locking here will get released to
10073 	 */
10074 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10075 	if (ret)
10076 		return ret;
10077 
10078 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10079 		spin_lock(&crtc->commit_lock);
10080 		commit = list_first_entry_or_null(&crtc->commit_list,
10081 				struct drm_crtc_commit, commit_entry);
10082 		if (commit)
10083 			drm_crtc_commit_get(commit);
10084 		spin_unlock(&crtc->commit_lock);
10085 
10086 		if (!commit)
10087 			continue;
10088 
10089 		/*
10090 		 * Make sure all pending HW programming completed and
10091 		 * page flips done
10092 		 */
10093 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10094 
10095 		if (ret > 0)
10096 			ret = wait_for_completion_interruptible_timeout(
10097 					&commit->flip_done, 10*HZ);
10098 
10099 		if (ret == 0)
10100 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10101 				  "timed out\n", crtc->base.id, crtc->name);
10102 
10103 		drm_crtc_commit_put(commit);
10104 	}
10105 
10106 	return ret < 0 ? ret : 0;
10107 }
10108 
10109 static void get_freesync_config_for_crtc(
10110 	struct dm_crtc_state *new_crtc_state,
10111 	struct dm_connector_state *new_con_state)
10112 {
10113 	struct mod_freesync_config config = {0};
10114 	struct amdgpu_dm_connector *aconnector =
10115 			to_amdgpu_dm_connector(new_con_state->base.connector);
10116 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10117 	int vrefresh = drm_mode_vrefresh(mode);
10118 	bool fs_vid_mode = false;
10119 
10120 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10121 					vrefresh >= aconnector->min_vfreq &&
10122 					vrefresh <= aconnector->max_vfreq;
10123 
10124 	if (new_crtc_state->vrr_supported) {
10125 		new_crtc_state->stream->ignore_msa_timing_param = true;
10126 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10127 
10128 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10129 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10130 		config.vsif_supported = true;
10131 		config.btr = true;
10132 
10133 		if (fs_vid_mode) {
10134 			config.state = VRR_STATE_ACTIVE_FIXED;
10135 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10136 			goto out;
10137 		} else if (new_crtc_state->base.vrr_enabled) {
10138 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10139 		} else {
10140 			config.state = VRR_STATE_INACTIVE;
10141 		}
10142 	}
10143 out:
10144 	new_crtc_state->freesync_config = config;
10145 }
10146 
10147 static void reset_freesync_config_for_crtc(
10148 	struct dm_crtc_state *new_crtc_state)
10149 {
10150 	new_crtc_state->vrr_supported = false;
10151 
10152 	memset(&new_crtc_state->vrr_infopacket, 0,
10153 	       sizeof(new_crtc_state->vrr_infopacket));
10154 }
10155 
10156 static bool
10157 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10158 				 struct drm_crtc_state *new_crtc_state)
10159 {
10160 	const struct drm_display_mode *old_mode, *new_mode;
10161 
10162 	if (!old_crtc_state || !new_crtc_state)
10163 		return false;
10164 
10165 	old_mode = &old_crtc_state->mode;
10166 	new_mode = &new_crtc_state->mode;
10167 
10168 	if (old_mode->clock       == new_mode->clock &&
10169 	    old_mode->hdisplay    == new_mode->hdisplay &&
10170 	    old_mode->vdisplay    == new_mode->vdisplay &&
10171 	    old_mode->htotal      == new_mode->htotal &&
10172 	    old_mode->vtotal      != new_mode->vtotal &&
10173 	    old_mode->hsync_start == new_mode->hsync_start &&
10174 	    old_mode->vsync_start != new_mode->vsync_start &&
10175 	    old_mode->hsync_end   == new_mode->hsync_end &&
10176 	    old_mode->vsync_end   != new_mode->vsync_end &&
10177 	    old_mode->hskew       == new_mode->hskew &&
10178 	    old_mode->vscan       == new_mode->vscan &&
10179 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10180 	    (new_mode->vsync_end - new_mode->vsync_start))
10181 		return true;
10182 
10183 	return false;
10184 }
10185 
10186 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10187 	uint64_t num, den, res;
10188 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10189 
10190 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10191 
10192 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10193 	den = (unsigned long long)new_crtc_state->mode.htotal *
10194 	      (unsigned long long)new_crtc_state->mode.vtotal;
10195 
10196 	res = div_u64(num, den);
10197 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10198 }
10199 
10200 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10201 			 struct drm_atomic_state *state,
10202 			 struct drm_crtc *crtc,
10203 			 struct drm_crtc_state *old_crtc_state,
10204 			 struct drm_crtc_state *new_crtc_state,
10205 			 bool enable,
10206 			 bool *lock_and_validation_needed)
10207 {
10208 	struct dm_atomic_state *dm_state = NULL;
10209 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10210 	struct dc_stream_state *new_stream;
10211 	int ret = 0;
10212 
10213 	/*
10214 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10215 	 * update changed items
10216 	 */
10217 	struct amdgpu_crtc *acrtc = NULL;
10218 	struct amdgpu_dm_connector *aconnector = NULL;
10219 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10220 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10221 
10222 	new_stream = NULL;
10223 
10224 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10225 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10226 	acrtc = to_amdgpu_crtc(crtc);
10227 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10228 
10229 	/* TODO This hack should go away */
10230 	if (aconnector && enable) {
10231 		/* Make sure fake sink is created in plug-in scenario */
10232 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10233 							    &aconnector->base);
10234 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10235 							    &aconnector->base);
10236 
10237 		if (IS_ERR(drm_new_conn_state)) {
10238 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10239 			goto fail;
10240 		}
10241 
10242 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10243 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10244 
10245 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10246 			goto skip_modeset;
10247 
10248 		new_stream = create_validate_stream_for_sink(aconnector,
10249 							     &new_crtc_state->mode,
10250 							     dm_new_conn_state,
10251 							     dm_old_crtc_state->stream);
10252 
10253 		/*
10254 		 * we can have no stream on ACTION_SET if a display
10255 		 * was disconnected during S3, in this case it is not an
10256 		 * error, the OS will be updated after detection, and
10257 		 * will do the right thing on next atomic commit
10258 		 */
10259 
10260 		if (!new_stream) {
10261 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10262 					__func__, acrtc->base.base.id);
10263 			ret = -ENOMEM;
10264 			goto fail;
10265 		}
10266 
10267 		/*
10268 		 * TODO: Check VSDB bits to decide whether this should
10269 		 * be enabled or not.
10270 		 */
10271 		new_stream->triggered_crtc_reset.enabled =
10272 			dm->force_timing_sync;
10273 
10274 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10275 
10276 		ret = fill_hdr_info_packet(drm_new_conn_state,
10277 					   &new_stream->hdr_static_metadata);
10278 		if (ret)
10279 			goto fail;
10280 
10281 		/*
10282 		 * If we already removed the old stream from the context
10283 		 * (and set the new stream to NULL) then we can't reuse
10284 		 * the old stream even if the stream and scaling are unchanged.
10285 		 * We'll hit the BUG_ON and black screen.
10286 		 *
10287 		 * TODO: Refactor this function to allow this check to work
10288 		 * in all conditions.
10289 		 */
10290 		if (dm_new_crtc_state->stream &&
10291 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10292 			goto skip_modeset;
10293 
10294 		if (dm_new_crtc_state->stream &&
10295 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10296 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10297 			new_crtc_state->mode_changed = false;
10298 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10299 					 new_crtc_state->mode_changed);
10300 		}
10301 	}
10302 
10303 	/* mode_changed flag may get updated above, need to check again */
10304 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10305 		goto skip_modeset;
10306 
10307 	drm_dbg_state(state->dev,
10308 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10309 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10310 		"connectors_changed:%d\n",
10311 		acrtc->crtc_id,
10312 		new_crtc_state->enable,
10313 		new_crtc_state->active,
10314 		new_crtc_state->planes_changed,
10315 		new_crtc_state->mode_changed,
10316 		new_crtc_state->active_changed,
10317 		new_crtc_state->connectors_changed);
10318 
10319 	/* Remove stream for any changed/disabled CRTC */
10320 	if (!enable) {
10321 
10322 		if (!dm_old_crtc_state->stream)
10323 			goto skip_modeset;
10324 
10325 		if (dm_new_crtc_state->stream &&
10326 		    is_timing_unchanged_for_freesync(new_crtc_state,
10327 						     old_crtc_state)) {
10328 			new_crtc_state->mode_changed = false;
10329 			DRM_DEBUG_DRIVER(
10330 				"Mode change not required for front porch change, "
10331 				"setting mode_changed to %d",
10332 				new_crtc_state->mode_changed);
10333 
10334 			set_freesync_fixed_config(dm_new_crtc_state);
10335 
10336 			goto skip_modeset;
10337 		} else if (aconnector &&
10338 			   is_freesync_video_mode(&new_crtc_state->mode,
10339 						  aconnector)) {
10340 			struct drm_display_mode *high_mode;
10341 
10342 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10343 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10344 				set_freesync_fixed_config(dm_new_crtc_state);
10345 			}
10346 		}
10347 
10348 		ret = dm_atomic_get_state(state, &dm_state);
10349 		if (ret)
10350 			goto fail;
10351 
10352 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10353 				crtc->base.id);
10354 
10355 		/* i.e. reset mode */
10356 		if (dc_remove_stream_from_ctx(
10357 				dm->dc,
10358 				dm_state->context,
10359 				dm_old_crtc_state->stream) != DC_OK) {
10360 			ret = -EINVAL;
10361 			goto fail;
10362 		}
10363 
10364 		dc_stream_release(dm_old_crtc_state->stream);
10365 		dm_new_crtc_state->stream = NULL;
10366 
10367 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10368 
10369 		*lock_and_validation_needed = true;
10370 
10371 	} else {/* Add stream for any updated/enabled CRTC */
10372 		/*
10373 		 * Quick fix to prevent NULL pointer on new_stream when
10374 		 * added MST connectors not found in existing crtc_state in the chained mode
10375 		 * TODO: need to dig out the root cause of that
10376 		 */
10377 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10378 			goto skip_modeset;
10379 
10380 		if (modereset_required(new_crtc_state))
10381 			goto skip_modeset;
10382 
10383 		if (modeset_required(new_crtc_state, new_stream,
10384 				     dm_old_crtc_state->stream)) {
10385 
10386 			WARN_ON(dm_new_crtc_state->stream);
10387 
10388 			ret = dm_atomic_get_state(state, &dm_state);
10389 			if (ret)
10390 				goto fail;
10391 
10392 			dm_new_crtc_state->stream = new_stream;
10393 
10394 			dc_stream_retain(new_stream);
10395 
10396 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10397 					 crtc->base.id);
10398 
10399 			if (dc_add_stream_to_ctx(
10400 					dm->dc,
10401 					dm_state->context,
10402 					dm_new_crtc_state->stream) != DC_OK) {
10403 				ret = -EINVAL;
10404 				goto fail;
10405 			}
10406 
10407 			*lock_and_validation_needed = true;
10408 		}
10409 	}
10410 
10411 skip_modeset:
10412 	/* Release extra reference */
10413 	if (new_stream)
10414 		 dc_stream_release(new_stream);
10415 
10416 	/*
10417 	 * We want to do dc stream updates that do not require a
10418 	 * full modeset below.
10419 	 */
10420 	if (!(enable && aconnector && new_crtc_state->active))
10421 		return 0;
10422 	/*
10423 	 * Given above conditions, the dc state cannot be NULL because:
10424 	 * 1. We're in the process of enabling CRTCs (just been added
10425 	 *    to the dc context, or already is on the context)
10426 	 * 2. Has a valid connector attached, and
10427 	 * 3. Is currently active and enabled.
10428 	 * => The dc stream state currently exists.
10429 	 */
10430 	BUG_ON(dm_new_crtc_state->stream == NULL);
10431 
10432 	/* Scaling or underscan settings */
10433 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10434 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10435 		update_stream_scaling_settings(
10436 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10437 
10438 	/* ABM settings */
10439 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10440 
10441 	/*
10442 	 * Color management settings. We also update color properties
10443 	 * when a modeset is needed, to ensure it gets reprogrammed.
10444 	 */
10445 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10446 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10447 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10448 		if (ret)
10449 			goto fail;
10450 	}
10451 
10452 	/* Update Freesync settings. */
10453 	get_freesync_config_for_crtc(dm_new_crtc_state,
10454 				     dm_new_conn_state);
10455 
10456 	return ret;
10457 
10458 fail:
10459 	if (new_stream)
10460 		dc_stream_release(new_stream);
10461 	return ret;
10462 }
10463 
10464 static bool should_reset_plane(struct drm_atomic_state *state,
10465 			       struct drm_plane *plane,
10466 			       struct drm_plane_state *old_plane_state,
10467 			       struct drm_plane_state *new_plane_state)
10468 {
10469 	struct drm_plane *other;
10470 	struct drm_plane_state *old_other_state, *new_other_state;
10471 	struct drm_crtc_state *new_crtc_state;
10472 	int i;
10473 
10474 	/*
10475 	 * TODO: Remove this hack once the checks below are sufficient
10476 	 * enough to determine when we need to reset all the planes on
10477 	 * the stream.
10478 	 */
10479 	if (state->allow_modeset)
10480 		return true;
10481 
10482 	/* Exit early if we know that we're adding or removing the plane. */
10483 	if (old_plane_state->crtc != new_plane_state->crtc)
10484 		return true;
10485 
10486 	/* old crtc == new_crtc == NULL, plane not in context. */
10487 	if (!new_plane_state->crtc)
10488 		return false;
10489 
10490 	new_crtc_state =
10491 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10492 
10493 	if (!new_crtc_state)
10494 		return true;
10495 
10496 	/* CRTC Degamma changes currently require us to recreate planes. */
10497 	if (new_crtc_state->color_mgmt_changed)
10498 		return true;
10499 
10500 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10501 		return true;
10502 
10503 	/*
10504 	 * If there are any new primary or overlay planes being added or
10505 	 * removed then the z-order can potentially change. To ensure
10506 	 * correct z-order and pipe acquisition the current DC architecture
10507 	 * requires us to remove and recreate all existing planes.
10508 	 *
10509 	 * TODO: Come up with a more elegant solution for this.
10510 	 */
10511 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10512 		struct amdgpu_framebuffer *old_afb, *new_afb;
10513 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10514 			continue;
10515 
10516 		if (old_other_state->crtc != new_plane_state->crtc &&
10517 		    new_other_state->crtc != new_plane_state->crtc)
10518 			continue;
10519 
10520 		if (old_other_state->crtc != new_other_state->crtc)
10521 			return true;
10522 
10523 		/* Src/dst size and scaling updates. */
10524 		if (old_other_state->src_w != new_other_state->src_w ||
10525 		    old_other_state->src_h != new_other_state->src_h ||
10526 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10527 		    old_other_state->crtc_h != new_other_state->crtc_h)
10528 			return true;
10529 
10530 		/* Rotation / mirroring updates. */
10531 		if (old_other_state->rotation != new_other_state->rotation)
10532 			return true;
10533 
10534 		/* Blending updates. */
10535 		if (old_other_state->pixel_blend_mode !=
10536 		    new_other_state->pixel_blend_mode)
10537 			return true;
10538 
10539 		/* Alpha updates. */
10540 		if (old_other_state->alpha != new_other_state->alpha)
10541 			return true;
10542 
10543 		/* Colorspace changes. */
10544 		if (old_other_state->color_range != new_other_state->color_range ||
10545 		    old_other_state->color_encoding != new_other_state->color_encoding)
10546 			return true;
10547 
10548 		/* Framebuffer checks fall at the end. */
10549 		if (!old_other_state->fb || !new_other_state->fb)
10550 			continue;
10551 
10552 		/* Pixel format changes can require bandwidth updates. */
10553 		if (old_other_state->fb->format != new_other_state->fb->format)
10554 			return true;
10555 
10556 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10557 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10558 
10559 		/* Tiling and DCC changes also require bandwidth updates. */
10560 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10561 		    old_afb->base.modifier != new_afb->base.modifier)
10562 			return true;
10563 	}
10564 
10565 	return false;
10566 }
10567 
10568 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10569 			      struct drm_plane_state *new_plane_state,
10570 			      struct drm_framebuffer *fb)
10571 {
10572 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10573 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10574 	unsigned int pitch;
10575 	bool linear;
10576 
10577 	if (fb->width > new_acrtc->max_cursor_width ||
10578 	    fb->height > new_acrtc->max_cursor_height) {
10579 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10580 				 new_plane_state->fb->width,
10581 				 new_plane_state->fb->height);
10582 		return -EINVAL;
10583 	}
10584 	if (new_plane_state->src_w != fb->width << 16 ||
10585 	    new_plane_state->src_h != fb->height << 16) {
10586 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10587 		return -EINVAL;
10588 	}
10589 
10590 	/* Pitch in pixels */
10591 	pitch = fb->pitches[0] / fb->format->cpp[0];
10592 
10593 	if (fb->width != pitch) {
10594 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10595 				 fb->width, pitch);
10596 		return -EINVAL;
10597 	}
10598 
10599 	switch (pitch) {
10600 	case 64:
10601 	case 128:
10602 	case 256:
10603 		/* FB pitch is supported by cursor plane */
10604 		break;
10605 	default:
10606 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10607 		return -EINVAL;
10608 	}
10609 
10610 	/* Core DRM takes care of checking FB modifiers, so we only need to
10611 	 * check tiling flags when the FB doesn't have a modifier. */
10612 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10613 		if (adev->family < AMDGPU_FAMILY_AI) {
10614 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10615 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10616 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10617 		} else {
10618 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10619 		}
10620 		if (!linear) {
10621 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10622 			return -EINVAL;
10623 		}
10624 	}
10625 
10626 	return 0;
10627 }
10628 
10629 static int dm_update_plane_state(struct dc *dc,
10630 				 struct drm_atomic_state *state,
10631 				 struct drm_plane *plane,
10632 				 struct drm_plane_state *old_plane_state,
10633 				 struct drm_plane_state *new_plane_state,
10634 				 bool enable,
10635 				 bool *lock_and_validation_needed)
10636 {
10637 
10638 	struct dm_atomic_state *dm_state = NULL;
10639 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10640 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10641 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10642 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10643 	struct amdgpu_crtc *new_acrtc;
10644 	bool needs_reset;
10645 	int ret = 0;
10646 
10647 
10648 	new_plane_crtc = new_plane_state->crtc;
10649 	old_plane_crtc = old_plane_state->crtc;
10650 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10651 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10652 
10653 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10654 		if (!enable || !new_plane_crtc ||
10655 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10656 			return 0;
10657 
10658 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10659 
10660 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10661 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10662 			return -EINVAL;
10663 		}
10664 
10665 		if (new_plane_state->fb) {
10666 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10667 						 new_plane_state->fb);
10668 			if (ret)
10669 				return ret;
10670 		}
10671 
10672 		return 0;
10673 	}
10674 
10675 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10676 					 new_plane_state);
10677 
10678 	/* Remove any changed/removed planes */
10679 	if (!enable) {
10680 		if (!needs_reset)
10681 			return 0;
10682 
10683 		if (!old_plane_crtc)
10684 			return 0;
10685 
10686 		old_crtc_state = drm_atomic_get_old_crtc_state(
10687 				state, old_plane_crtc);
10688 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10689 
10690 		if (!dm_old_crtc_state->stream)
10691 			return 0;
10692 
10693 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10694 				plane->base.id, old_plane_crtc->base.id);
10695 
10696 		ret = dm_atomic_get_state(state, &dm_state);
10697 		if (ret)
10698 			return ret;
10699 
10700 		if (!dc_remove_plane_from_context(
10701 				dc,
10702 				dm_old_crtc_state->stream,
10703 				dm_old_plane_state->dc_state,
10704 				dm_state->context)) {
10705 
10706 			return -EINVAL;
10707 		}
10708 
10709 
10710 		dc_plane_state_release(dm_old_plane_state->dc_state);
10711 		dm_new_plane_state->dc_state = NULL;
10712 
10713 		*lock_and_validation_needed = true;
10714 
10715 	} else { /* Add new planes */
10716 		struct dc_plane_state *dc_new_plane_state;
10717 
10718 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10719 			return 0;
10720 
10721 		if (!new_plane_crtc)
10722 			return 0;
10723 
10724 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10725 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10726 
10727 		if (!dm_new_crtc_state->stream)
10728 			return 0;
10729 
10730 		if (!needs_reset)
10731 			return 0;
10732 
10733 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10734 		if (ret)
10735 			return ret;
10736 
10737 		WARN_ON(dm_new_plane_state->dc_state);
10738 
10739 		dc_new_plane_state = dc_create_plane_state(dc);
10740 		if (!dc_new_plane_state)
10741 			return -ENOMEM;
10742 
10743 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10744 				 plane->base.id, new_plane_crtc->base.id);
10745 
10746 		ret = fill_dc_plane_attributes(
10747 			drm_to_adev(new_plane_crtc->dev),
10748 			dc_new_plane_state,
10749 			new_plane_state,
10750 			new_crtc_state);
10751 		if (ret) {
10752 			dc_plane_state_release(dc_new_plane_state);
10753 			return ret;
10754 		}
10755 
10756 		ret = dm_atomic_get_state(state, &dm_state);
10757 		if (ret) {
10758 			dc_plane_state_release(dc_new_plane_state);
10759 			return ret;
10760 		}
10761 
10762 		/*
10763 		 * Any atomic check errors that occur after this will
10764 		 * not need a release. The plane state will be attached
10765 		 * to the stream, and therefore part of the atomic
10766 		 * state. It'll be released when the atomic state is
10767 		 * cleaned.
10768 		 */
10769 		if (!dc_add_plane_to_context(
10770 				dc,
10771 				dm_new_crtc_state->stream,
10772 				dc_new_plane_state,
10773 				dm_state->context)) {
10774 
10775 			dc_plane_state_release(dc_new_plane_state);
10776 			return -EINVAL;
10777 		}
10778 
10779 		dm_new_plane_state->dc_state = dc_new_plane_state;
10780 
10781 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10782 
10783 		/* Tell DC to do a full surface update every time there
10784 		 * is a plane change. Inefficient, but works for now.
10785 		 */
10786 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10787 
10788 		*lock_and_validation_needed = true;
10789 	}
10790 
10791 
10792 	return ret;
10793 }
10794 
10795 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10796 				       int *src_w, int *src_h)
10797 {
10798 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10799 	case DRM_MODE_ROTATE_90:
10800 	case DRM_MODE_ROTATE_270:
10801 		*src_w = plane_state->src_h >> 16;
10802 		*src_h = plane_state->src_w >> 16;
10803 		break;
10804 	case DRM_MODE_ROTATE_0:
10805 	case DRM_MODE_ROTATE_180:
10806 	default:
10807 		*src_w = plane_state->src_w >> 16;
10808 		*src_h = plane_state->src_h >> 16;
10809 		break;
10810 	}
10811 }
10812 
10813 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10814 				struct drm_crtc *crtc,
10815 				struct drm_crtc_state *new_crtc_state)
10816 {
10817 	struct drm_plane *cursor = crtc->cursor, *underlying;
10818 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10819 	int i;
10820 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10821 	int cursor_src_w, cursor_src_h;
10822 	int underlying_src_w, underlying_src_h;
10823 
10824 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10825 	 * cursor per pipe but it's going to inherit the scaling and
10826 	 * positioning from the underlying pipe. Check the cursor plane's
10827 	 * blending properties match the underlying planes'. */
10828 
10829 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10830 	if (!new_cursor_state || !new_cursor_state->fb) {
10831 		return 0;
10832 	}
10833 
10834 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10835 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10836 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10837 
10838 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10839 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10840 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10841 			continue;
10842 
10843 		/* Ignore disabled planes */
10844 		if (!new_underlying_state->fb)
10845 			continue;
10846 
10847 		dm_get_oriented_plane_size(new_underlying_state,
10848 					   &underlying_src_w, &underlying_src_h);
10849 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10850 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10851 
10852 		if (cursor_scale_w != underlying_scale_w ||
10853 		    cursor_scale_h != underlying_scale_h) {
10854 			drm_dbg_atomic(crtc->dev,
10855 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10856 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10857 			return -EINVAL;
10858 		}
10859 
10860 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10861 		if (new_underlying_state->crtc_x <= 0 &&
10862 		    new_underlying_state->crtc_y <= 0 &&
10863 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10864 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10865 			break;
10866 	}
10867 
10868 	return 0;
10869 }
10870 
10871 #if defined(CONFIG_DRM_AMD_DC_DCN)
10872 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10873 {
10874 	struct drm_connector *connector;
10875 	struct drm_connector_state *conn_state, *old_conn_state;
10876 	struct amdgpu_dm_connector *aconnector = NULL;
10877 	int i;
10878 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10879 		if (!conn_state->crtc)
10880 			conn_state = old_conn_state;
10881 
10882 		if (conn_state->crtc != crtc)
10883 			continue;
10884 
10885 		aconnector = to_amdgpu_dm_connector(connector);
10886 		if (!aconnector->port || !aconnector->mst_port)
10887 			aconnector = NULL;
10888 		else
10889 			break;
10890 	}
10891 
10892 	if (!aconnector)
10893 		return 0;
10894 
10895 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10896 }
10897 #endif
10898 
10899 /**
10900  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10901  * @dev: The DRM device
10902  * @state: The atomic state to commit
10903  *
10904  * Validate that the given atomic state is programmable by DC into hardware.
10905  * This involves constructing a &struct dc_state reflecting the new hardware
10906  * state we wish to commit, then querying DC to see if it is programmable. It's
10907  * important not to modify the existing DC state. Otherwise, atomic_check
10908  * may unexpectedly commit hardware changes.
10909  *
10910  * When validating the DC state, it's important that the right locks are
10911  * acquired. For full updates case which removes/adds/updates streams on one
10912  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10913  * that any such full update commit will wait for completion of any outstanding
10914  * flip using DRMs synchronization events.
10915  *
10916  * Note that DM adds the affected connectors for all CRTCs in state, when that
10917  * might not seem necessary. This is because DC stream creation requires the
10918  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10919  * be possible but non-trivial - a possible TODO item.
10920  *
10921  * Return: -Error code if validation failed.
10922  */
10923 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10924 				  struct drm_atomic_state *state)
10925 {
10926 	struct amdgpu_device *adev = drm_to_adev(dev);
10927 	struct dm_atomic_state *dm_state = NULL;
10928 	struct dc *dc = adev->dm.dc;
10929 	struct drm_connector *connector;
10930 	struct drm_connector_state *old_con_state, *new_con_state;
10931 	struct drm_crtc *crtc;
10932 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10933 	struct drm_plane *plane;
10934 	struct drm_plane_state *old_plane_state, *new_plane_state;
10935 	enum dc_status status;
10936 	int ret, i;
10937 	bool lock_and_validation_needed = false;
10938 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10939 #if defined(CONFIG_DRM_AMD_DC_DCN)
10940 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10941 	struct drm_dp_mst_topology_state *mst_state;
10942 	struct drm_dp_mst_topology_mgr *mgr;
10943 #endif
10944 
10945 	trace_amdgpu_dm_atomic_check_begin(state);
10946 
10947 	ret = drm_atomic_helper_check_modeset(dev, state);
10948 	if (ret) {
10949 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10950 		goto fail;
10951 	}
10952 
10953 	/* Check connector changes */
10954 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10955 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10956 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10957 
10958 		/* Skip connectors that are disabled or part of modeset already. */
10959 		if (!old_con_state->crtc && !new_con_state->crtc)
10960 			continue;
10961 
10962 		if (!new_con_state->crtc)
10963 			continue;
10964 
10965 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10966 		if (IS_ERR(new_crtc_state)) {
10967 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10968 			ret = PTR_ERR(new_crtc_state);
10969 			goto fail;
10970 		}
10971 
10972 		if (dm_old_con_state->abm_level !=
10973 		    dm_new_con_state->abm_level)
10974 			new_crtc_state->connectors_changed = true;
10975 	}
10976 
10977 #if defined(CONFIG_DRM_AMD_DC_DCN)
10978 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10979 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10980 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10981 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10982 				if (ret) {
10983 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10984 					goto fail;
10985 				}
10986 			}
10987 		}
10988 		pre_validate_dsc(state, &dm_state, vars);
10989 	}
10990 #endif
10991 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10992 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10993 
10994 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10995 		    !new_crtc_state->color_mgmt_changed &&
10996 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10997 			dm_old_crtc_state->dsc_force_changed == false)
10998 			continue;
10999 
11000 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11001 		if (ret) {
11002 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11003 			goto fail;
11004 		}
11005 
11006 		if (!new_crtc_state->enable)
11007 			continue;
11008 
11009 		ret = drm_atomic_add_affected_connectors(state, crtc);
11010 		if (ret) {
11011 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11012 			goto fail;
11013 		}
11014 
11015 		ret = drm_atomic_add_affected_planes(state, crtc);
11016 		if (ret) {
11017 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11018 			goto fail;
11019 		}
11020 
11021 		if (dm_old_crtc_state->dsc_force_changed)
11022 			new_crtc_state->mode_changed = true;
11023 	}
11024 
11025 	/*
11026 	 * Add all primary and overlay planes on the CRTC to the state
11027 	 * whenever a plane is enabled to maintain correct z-ordering
11028 	 * and to enable fast surface updates.
11029 	 */
11030 	drm_for_each_crtc(crtc, dev) {
11031 		bool modified = false;
11032 
11033 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11034 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11035 				continue;
11036 
11037 			if (new_plane_state->crtc == crtc ||
11038 			    old_plane_state->crtc == crtc) {
11039 				modified = true;
11040 				break;
11041 			}
11042 		}
11043 
11044 		if (!modified)
11045 			continue;
11046 
11047 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11048 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11049 				continue;
11050 
11051 			new_plane_state =
11052 				drm_atomic_get_plane_state(state, plane);
11053 
11054 			if (IS_ERR(new_plane_state)) {
11055 				ret = PTR_ERR(new_plane_state);
11056 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11057 				goto fail;
11058 			}
11059 		}
11060 	}
11061 
11062 	/* Remove exiting planes if they are modified */
11063 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11064 		ret = dm_update_plane_state(dc, state, plane,
11065 					    old_plane_state,
11066 					    new_plane_state,
11067 					    false,
11068 					    &lock_and_validation_needed);
11069 		if (ret) {
11070 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11071 			goto fail;
11072 		}
11073 	}
11074 
11075 	/* Disable all crtcs which require disable */
11076 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11077 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11078 					   old_crtc_state,
11079 					   new_crtc_state,
11080 					   false,
11081 					   &lock_and_validation_needed);
11082 		if (ret) {
11083 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11084 			goto fail;
11085 		}
11086 	}
11087 
11088 	/* Enable all crtcs which require enable */
11089 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11090 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11091 					   old_crtc_state,
11092 					   new_crtc_state,
11093 					   true,
11094 					   &lock_and_validation_needed);
11095 		if (ret) {
11096 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11097 			goto fail;
11098 		}
11099 	}
11100 
11101 	/* Add new/modified planes */
11102 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11103 		ret = dm_update_plane_state(dc, state, plane,
11104 					    old_plane_state,
11105 					    new_plane_state,
11106 					    true,
11107 					    &lock_and_validation_needed);
11108 		if (ret) {
11109 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11110 			goto fail;
11111 		}
11112 	}
11113 
11114 	/* Run this here since we want to validate the streams we created */
11115 	ret = drm_atomic_helper_check_planes(dev, state);
11116 	if (ret) {
11117 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11118 		goto fail;
11119 	}
11120 
11121 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11122 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11123 		if (dm_new_crtc_state->mpo_requested)
11124 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11125 	}
11126 
11127 	/* Check cursor planes scaling */
11128 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11129 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11130 		if (ret) {
11131 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11132 			goto fail;
11133 		}
11134 	}
11135 
11136 	if (state->legacy_cursor_update) {
11137 		/*
11138 		 * This is a fast cursor update coming from the plane update
11139 		 * helper, check if it can be done asynchronously for better
11140 		 * performance.
11141 		 */
11142 		state->async_update =
11143 			!drm_atomic_helper_async_check(dev, state);
11144 
11145 		/*
11146 		 * Skip the remaining global validation if this is an async
11147 		 * update. Cursor updates can be done without affecting
11148 		 * state or bandwidth calcs and this avoids the performance
11149 		 * penalty of locking the private state object and
11150 		 * allocating a new dc_state.
11151 		 */
11152 		if (state->async_update)
11153 			return 0;
11154 	}
11155 
11156 	/* Check scaling and underscan changes*/
11157 	/* TODO Removed scaling changes validation due to inability to commit
11158 	 * new stream into context w\o causing full reset. Need to
11159 	 * decide how to handle.
11160 	 */
11161 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11162 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11163 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11164 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11165 
11166 		/* Skip any modesets/resets */
11167 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11168 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11169 			continue;
11170 
11171 		/* Skip any thing not scale or underscan changes */
11172 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11173 			continue;
11174 
11175 		lock_and_validation_needed = true;
11176 	}
11177 
11178 #if defined(CONFIG_DRM_AMD_DC_DCN)
11179 	/* set the slot info for each mst_state based on the link encoding format */
11180 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11181 		struct amdgpu_dm_connector *aconnector;
11182 		struct drm_connector *connector;
11183 		struct drm_connector_list_iter iter;
11184 		u8 link_coding_cap;
11185 
11186 		if (!mgr->mst_state )
11187 			continue;
11188 
11189 		drm_connector_list_iter_begin(dev, &iter);
11190 		drm_for_each_connector_iter(connector, &iter) {
11191 			int id = connector->index;
11192 
11193 			if (id == mst_state->mgr->conn_base_id) {
11194 				aconnector = to_amdgpu_dm_connector(connector);
11195 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11196 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11197 
11198 				break;
11199 			}
11200 		}
11201 		drm_connector_list_iter_end(&iter);
11202 
11203 	}
11204 #endif
11205 	/**
11206 	 * Streams and planes are reset when there are changes that affect
11207 	 * bandwidth. Anything that affects bandwidth needs to go through
11208 	 * DC global validation to ensure that the configuration can be applied
11209 	 * to hardware.
11210 	 *
11211 	 * We have to currently stall out here in atomic_check for outstanding
11212 	 * commits to finish in this case because our IRQ handlers reference
11213 	 * DRM state directly - we can end up disabling interrupts too early
11214 	 * if we don't.
11215 	 *
11216 	 * TODO: Remove this stall and drop DM state private objects.
11217 	 */
11218 	if (lock_and_validation_needed) {
11219 		ret = dm_atomic_get_state(state, &dm_state);
11220 		if (ret) {
11221 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11222 			goto fail;
11223 		}
11224 
11225 		ret = do_aquire_global_lock(dev, state);
11226 		if (ret) {
11227 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11228 			goto fail;
11229 		}
11230 
11231 #if defined(CONFIG_DRM_AMD_DC_DCN)
11232 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11233 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11234 			goto fail;
11235 		}
11236 
11237 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11238 		if (ret) {
11239 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11240 			goto fail;
11241 		}
11242 #endif
11243 
11244 		/*
11245 		 * Perform validation of MST topology in the state:
11246 		 * We need to perform MST atomic check before calling
11247 		 * dc_validate_global_state(), or there is a chance
11248 		 * to get stuck in an infinite loop and hang eventually.
11249 		 */
11250 		ret = drm_dp_mst_atomic_check(state);
11251 		if (ret) {
11252 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11253 			goto fail;
11254 		}
11255 		status = dc_validate_global_state(dc, dm_state->context, true);
11256 		if (status != DC_OK) {
11257 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11258 				       dc_status_to_str(status), status);
11259 			ret = -EINVAL;
11260 			goto fail;
11261 		}
11262 	} else {
11263 		/*
11264 		 * The commit is a fast update. Fast updates shouldn't change
11265 		 * the DC context, affect global validation, and can have their
11266 		 * commit work done in parallel with other commits not touching
11267 		 * the same resource. If we have a new DC context as part of
11268 		 * the DM atomic state from validation we need to free it and
11269 		 * retain the existing one instead.
11270 		 *
11271 		 * Furthermore, since the DM atomic state only contains the DC
11272 		 * context and can safely be annulled, we can free the state
11273 		 * and clear the associated private object now to free
11274 		 * some memory and avoid a possible use-after-free later.
11275 		 */
11276 
11277 		for (i = 0; i < state->num_private_objs; i++) {
11278 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11279 
11280 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11281 				int j = state->num_private_objs-1;
11282 
11283 				dm_atomic_destroy_state(obj,
11284 						state->private_objs[i].state);
11285 
11286 				/* If i is not at the end of the array then the
11287 				 * last element needs to be moved to where i was
11288 				 * before the array can safely be truncated.
11289 				 */
11290 				if (i != j)
11291 					state->private_objs[i] =
11292 						state->private_objs[j];
11293 
11294 				state->private_objs[j].ptr = NULL;
11295 				state->private_objs[j].state = NULL;
11296 				state->private_objs[j].old_state = NULL;
11297 				state->private_objs[j].new_state = NULL;
11298 
11299 				state->num_private_objs = j;
11300 				break;
11301 			}
11302 		}
11303 	}
11304 
11305 	/* Store the overall update type for use later in atomic check. */
11306 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11307 		struct dm_crtc_state *dm_new_crtc_state =
11308 			to_dm_crtc_state(new_crtc_state);
11309 
11310 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11311 							 UPDATE_TYPE_FULL :
11312 							 UPDATE_TYPE_FAST;
11313 	}
11314 
11315 	/* Must be success */
11316 	WARN_ON(ret);
11317 
11318 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11319 
11320 	return ret;
11321 
11322 fail:
11323 	if (ret == -EDEADLK)
11324 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11325 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11326 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11327 	else
11328 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11329 
11330 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11331 
11332 	return ret;
11333 }
11334 
11335 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11336 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11337 {
11338 	uint8_t dpcd_data;
11339 	bool capable = false;
11340 
11341 	if (amdgpu_dm_connector->dc_link &&
11342 		dm_helpers_dp_read_dpcd(
11343 				NULL,
11344 				amdgpu_dm_connector->dc_link,
11345 				DP_DOWN_STREAM_PORT_COUNT,
11346 				&dpcd_data,
11347 				sizeof(dpcd_data))) {
11348 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11349 	}
11350 
11351 	return capable;
11352 }
11353 
11354 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11355 		unsigned int offset,
11356 		unsigned int total_length,
11357 		uint8_t *data,
11358 		unsigned int length,
11359 		struct amdgpu_hdmi_vsdb_info *vsdb)
11360 {
11361 	bool res;
11362 	union dmub_rb_cmd cmd;
11363 	struct dmub_cmd_send_edid_cea *input;
11364 	struct dmub_cmd_edid_cea_output *output;
11365 
11366 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11367 		return false;
11368 
11369 	memset(&cmd, 0, sizeof(cmd));
11370 
11371 	input = &cmd.edid_cea.data.input;
11372 
11373 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11374 	cmd.edid_cea.header.sub_type = 0;
11375 	cmd.edid_cea.header.payload_bytes =
11376 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11377 	input->offset = offset;
11378 	input->length = length;
11379 	input->cea_total_length = total_length;
11380 	memcpy(input->payload, data, length);
11381 
11382 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11383 	if (!res) {
11384 		DRM_ERROR("EDID CEA parser failed\n");
11385 		return false;
11386 	}
11387 
11388 	output = &cmd.edid_cea.data.output;
11389 
11390 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11391 		if (!output->ack.success) {
11392 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11393 					output->ack.offset);
11394 		}
11395 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11396 		if (!output->amd_vsdb.vsdb_found)
11397 			return false;
11398 
11399 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11400 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11401 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11402 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11403 	} else {
11404 		DRM_WARN("Unknown EDID CEA parser results\n");
11405 		return false;
11406 	}
11407 
11408 	return true;
11409 }
11410 
11411 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11412 		uint8_t *edid_ext, int len,
11413 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11414 {
11415 	int i;
11416 
11417 	/* send extension block to DMCU for parsing */
11418 	for (i = 0; i < len; i += 8) {
11419 		bool res;
11420 		int offset;
11421 
11422 		/* send 8 bytes a time */
11423 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11424 			return false;
11425 
11426 		if (i+8 == len) {
11427 			/* EDID block sent completed, expect result */
11428 			int version, min_rate, max_rate;
11429 
11430 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11431 			if (res) {
11432 				/* amd vsdb found */
11433 				vsdb_info->freesync_supported = 1;
11434 				vsdb_info->amd_vsdb_version = version;
11435 				vsdb_info->min_refresh_rate_hz = min_rate;
11436 				vsdb_info->max_refresh_rate_hz = max_rate;
11437 				return true;
11438 			}
11439 			/* not amd vsdb */
11440 			return false;
11441 		}
11442 
11443 		/* check for ack*/
11444 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11445 		if (!res)
11446 			return false;
11447 	}
11448 
11449 	return false;
11450 }
11451 
11452 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11453 		uint8_t *edid_ext, int len,
11454 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11455 {
11456 	int i;
11457 
11458 	/* send extension block to DMCU for parsing */
11459 	for (i = 0; i < len; i += 8) {
11460 		/* send 8 bytes a time */
11461 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11462 			return false;
11463 	}
11464 
11465 	return vsdb_info->freesync_supported;
11466 }
11467 
11468 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11469 		uint8_t *edid_ext, int len,
11470 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11471 {
11472 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11473 
11474 	if (adev->dm.dmub_srv)
11475 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11476 	else
11477 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11478 }
11479 
11480 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11481 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11482 {
11483 	uint8_t *edid_ext = NULL;
11484 	int i;
11485 	bool valid_vsdb_found = false;
11486 
11487 	/*----- drm_find_cea_extension() -----*/
11488 	/* No EDID or EDID extensions */
11489 	if (edid == NULL || edid->extensions == 0)
11490 		return -ENODEV;
11491 
11492 	/* Find CEA extension */
11493 	for (i = 0; i < edid->extensions; i++) {
11494 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11495 		if (edid_ext[0] == CEA_EXT)
11496 			break;
11497 	}
11498 
11499 	if (i == edid->extensions)
11500 		return -ENODEV;
11501 
11502 	/*----- cea_db_offsets() -----*/
11503 	if (edid_ext[0] != CEA_EXT)
11504 		return -ENODEV;
11505 
11506 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11507 
11508 	return valid_vsdb_found ? i : -ENODEV;
11509 }
11510 
11511 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11512 					struct edid *edid)
11513 {
11514 	int i = 0;
11515 	struct detailed_timing *timing;
11516 	struct detailed_non_pixel *data;
11517 	struct detailed_data_monitor_range *range;
11518 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11519 			to_amdgpu_dm_connector(connector);
11520 	struct dm_connector_state *dm_con_state = NULL;
11521 	struct dc_sink *sink;
11522 
11523 	struct drm_device *dev = connector->dev;
11524 	struct amdgpu_device *adev = drm_to_adev(dev);
11525 	bool freesync_capable = false;
11526 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11527 
11528 	if (!connector->state) {
11529 		DRM_ERROR("%s - Connector has no state", __func__);
11530 		goto update;
11531 	}
11532 
11533 	sink = amdgpu_dm_connector->dc_sink ?
11534 		amdgpu_dm_connector->dc_sink :
11535 		amdgpu_dm_connector->dc_em_sink;
11536 
11537 	if (!edid || !sink) {
11538 		dm_con_state = to_dm_connector_state(connector->state);
11539 
11540 		amdgpu_dm_connector->min_vfreq = 0;
11541 		amdgpu_dm_connector->max_vfreq = 0;
11542 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11543 		connector->display_info.monitor_range.min_vfreq = 0;
11544 		connector->display_info.monitor_range.max_vfreq = 0;
11545 		freesync_capable = false;
11546 
11547 		goto update;
11548 	}
11549 
11550 	dm_con_state = to_dm_connector_state(connector->state);
11551 
11552 	if (!adev->dm.freesync_module)
11553 		goto update;
11554 
11555 
11556 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11557 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11558 		bool edid_check_required = false;
11559 
11560 		if (edid) {
11561 			edid_check_required = is_dp_capable_without_timing_msa(
11562 						adev->dm.dc,
11563 						amdgpu_dm_connector);
11564 		}
11565 
11566 		if (edid_check_required == true && (edid->version > 1 ||
11567 		   (edid->version == 1 && edid->revision > 1))) {
11568 			for (i = 0; i < 4; i++) {
11569 
11570 				timing	= &edid->detailed_timings[i];
11571 				data	= &timing->data.other_data;
11572 				range	= &data->data.range;
11573 				/*
11574 				 * Check if monitor has continuous frequency mode
11575 				 */
11576 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11577 					continue;
11578 				/*
11579 				 * Check for flag range limits only. If flag == 1 then
11580 				 * no additional timing information provided.
11581 				 * Default GTF, GTF Secondary curve and CVT are not
11582 				 * supported
11583 				 */
11584 				if (range->flags != 1)
11585 					continue;
11586 
11587 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11588 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11589 				amdgpu_dm_connector->pixel_clock_mhz =
11590 					range->pixel_clock_mhz * 10;
11591 
11592 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11593 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11594 
11595 				break;
11596 			}
11597 
11598 			if (amdgpu_dm_connector->max_vfreq -
11599 			    amdgpu_dm_connector->min_vfreq > 10) {
11600 
11601 				freesync_capable = true;
11602 			}
11603 		}
11604 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11605 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11606 		if (i >= 0 && vsdb_info.freesync_supported) {
11607 			timing  = &edid->detailed_timings[i];
11608 			data    = &timing->data.other_data;
11609 
11610 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11611 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11612 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11613 				freesync_capable = true;
11614 
11615 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11616 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11617 		}
11618 	}
11619 
11620 update:
11621 	if (dm_con_state)
11622 		dm_con_state->freesync_capable = freesync_capable;
11623 
11624 	if (connector->vrr_capable_property)
11625 		drm_connector_set_vrr_capable_property(connector,
11626 						       freesync_capable);
11627 }
11628 
11629 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11630 {
11631 	struct amdgpu_device *adev = drm_to_adev(dev);
11632 	struct dc *dc = adev->dm.dc;
11633 	int i;
11634 
11635 	mutex_lock(&adev->dm.dc_lock);
11636 	if (dc->current_state) {
11637 		for (i = 0; i < dc->current_state->stream_count; ++i)
11638 			dc->current_state->streams[i]
11639 				->triggered_crtc_reset.enabled =
11640 				adev->dm.force_timing_sync;
11641 
11642 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11643 		dc_trigger_sync(dc, dc->current_state);
11644 	}
11645 	mutex_unlock(&adev->dm.dc_lock);
11646 }
11647 
11648 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11649 		       uint32_t value, const char *func_name)
11650 {
11651 #ifdef DM_CHECK_ADDR_0
11652 	if (address == 0) {
11653 		DC_ERR("invalid register write. address = 0");
11654 		return;
11655 	}
11656 #endif
11657 	cgs_write_register(ctx->cgs_device, address, value);
11658 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11659 }
11660 
11661 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11662 			  const char *func_name)
11663 {
11664 	uint32_t value;
11665 #ifdef DM_CHECK_ADDR_0
11666 	if (address == 0) {
11667 		DC_ERR("invalid register read; address = 0\n");
11668 		return 0;
11669 	}
11670 #endif
11671 
11672 	if (ctx->dmub_srv &&
11673 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11674 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11675 		ASSERT(false);
11676 		return 0;
11677 	}
11678 
11679 	value = cgs_read_register(ctx->cgs_device, address);
11680 
11681 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11682 
11683 	return value;
11684 }
11685 
11686 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11687 						struct dc_context *ctx,
11688 						uint8_t status_type,
11689 						uint32_t *operation_result)
11690 {
11691 	struct amdgpu_device *adev = ctx->driver_context;
11692 	int return_status = -1;
11693 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11694 
11695 	if (is_cmd_aux) {
11696 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11697 			return_status = p_notify->aux_reply.length;
11698 			*operation_result = p_notify->result;
11699 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11700 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11701 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11702 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11703 		} else {
11704 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11705 		}
11706 	} else {
11707 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11708 			return_status = 0;
11709 			*operation_result = p_notify->sc_status;
11710 		} else {
11711 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11712 		}
11713 	}
11714 
11715 	return return_status;
11716 }
11717 
11718 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11719 	unsigned int link_index, void *cmd_payload, void *operation_result)
11720 {
11721 	struct amdgpu_device *adev = ctx->driver_context;
11722 	int ret = 0;
11723 
11724 	if (is_cmd_aux) {
11725 		dc_process_dmub_aux_transfer_async(ctx->dc,
11726 			link_index, (struct aux_payload *)cmd_payload);
11727 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11728 					(struct set_config_cmd_payload *)cmd_payload,
11729 					adev->dm.dmub_notify)) {
11730 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11731 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11732 					(uint32_t *)operation_result);
11733 	}
11734 
11735 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11736 	if (ret == 0) {
11737 		DRM_ERROR("wait_for_completion_timeout timeout!");
11738 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11739 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11740 				(uint32_t *)operation_result);
11741 	}
11742 
11743 	if (is_cmd_aux) {
11744 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11745 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11746 
11747 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11748 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11749 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11750 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11751 				       adev->dm.dmub_notify->aux_reply.length);
11752 			}
11753 		}
11754 	}
11755 
11756 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11757 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11758 			(uint32_t *)operation_result);
11759 }
11760 
11761 /*
11762  * Check whether seamless boot is supported.
11763  *
11764  * So far we only support seamless boot on CHIP_VANGOGH.
11765  * If everything goes well, we may consider expanding
11766  * seamless boot to other ASICs.
11767  */
11768 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11769 {
11770 	switch (adev->asic_type) {
11771 	case CHIP_VANGOGH:
11772 		if (!adev->mman.keep_stolen_vga_memory)
11773 			return true;
11774 		break;
11775 	default:
11776 		break;
11777 	}
11778 
11779 	return false;
11780 }
11781